From 42d7f408661b115f131c0c89188deaf17389962d Mon Sep 17 00:00:00 2001 From: Kevin Zheng Date: Wed, 23 Jul 2025 14:28:59 +0000 Subject: [PATCH] chore: Removed autogenerated files from the feature branch --- google/cloud/bigtable_admin/__init__.py | 451 - google/cloud/bigtable_admin/py.typed | 2 - google/cloud/bigtable_admin_v2/__init__.py | 274 - .../bigtable_admin_v2/gapic_metadata.json | 1037 - google/cloud/bigtable_admin_v2/py.typed | 2 - .../bigtable_admin_v2/services/__init__.py | 15 - .../bigtable_instance_admin/__init__.py | 22 - .../bigtable_instance_admin/async_client.py | 4340 --- .../bigtable_instance_admin/client.py | 4818 --- .../bigtable_instance_admin/pagers.py | 681 - .../transports/README.rst | 9 - .../transports/__init__.py | 40 - .../transports/base.py | 755 - .../transports/grpc.py | 1248 - .../transports/grpc_asyncio.py | 1579 - .../transports/rest.py | 6824 ---- .../transports/rest_base.py | 1746 - .../services/bigtable_table_admin/__init__.py | 22 - .../bigtable_table_admin/async_client.py | 4960 --- .../services/bigtable_table_admin/client.py | 5439 --- .../services/bigtable_table_admin/pagers.py | 829 - .../transports/README.rst | 9 - .../transports/__init__.py | 40 - .../bigtable_table_admin/transports/base.py | 785 - .../bigtable_table_admin/transports/grpc.py | 1395 - .../transports/grpc_asyncio.py | 1719 - .../bigtable_table_admin/transports/rest.py | 7638 ---- .../transports/rest_base.py | 2001 -- .../cloud/bigtable_admin_v2/types/__init__.py | 268 - .../types/bigtable_instance_admin.py | 1364 - .../types/bigtable_table_admin.py | 1966 - .../cloud/bigtable_admin_v2/types/common.py | 81 - .../cloud/bigtable_admin_v2/types/instance.py | 826 - .../bigtable_admin_v2/types/oneof_message.py | 111 - google/cloud/bigtable_admin_v2/types/table.py | 1114 - google/cloud/bigtable_admin_v2/types/types.py | 842 - ...instance_admin_create_app_profile_async.py | 57 - ..._instance_admin_create_app_profile_sync.py | 57 - ...ble_instance_admin_create_cluster_async.py | 57 - ...able_instance_admin_create_cluster_sync.py | 57 - ...le_instance_admin_create_instance_async.py | 61 - ...ble_instance_admin_create_instance_sync.py | 61 - ...nstance_admin_create_logical_view_async.py | 61 - ...instance_admin_create_logical_view_sync.py | 61 - ...ce_admin_create_materialized_view_async.py | 61 - ...nce_admin_create_materialized_view_sync.py | 61 - ...instance_admin_delete_app_profile_async.py | 51 - ..._instance_admin_delete_app_profile_sync.py | 51 - ...ble_instance_admin_delete_cluster_async.py | 50 - ...able_instance_admin_delete_cluster_sync.py | 50 - ...le_instance_admin_delete_instance_async.py | 50 - ...ble_instance_admin_delete_instance_sync.py | 50 - ...nstance_admin_delete_logical_view_async.py | 50 - ...instance_admin_delete_logical_view_sync.py | 50 - ...ce_admin_delete_materialized_view_async.py | 50 - ...nce_admin_delete_materialized_view_sync.py | 50 - ...le_instance_admin_get_app_profile_async.py | 52 - ...ble_instance_admin_get_app_profile_sync.py | 52 - ...gtable_instance_admin_get_cluster_async.py | 52 - ...igtable_instance_admin_get_cluster_sync.py | 52 - ...ble_instance_admin_get_iam_policy_async.py | 53 - ...able_instance_admin_get_iam_policy_sync.py | 53 - ...table_instance_admin_get_instance_async.py | 52 - ...gtable_instance_admin_get_instance_sync.py | 52 - ...e_instance_admin_get_logical_view_async.py | 52 - ...le_instance_admin_get_logical_view_sync.py | 52 - ...tance_admin_get_materialized_view_async.py | 52 - ...stance_admin_get_materialized_view_sync.py | 52 - ..._instance_admin_list_app_profiles_async.py | 53 - ...e_instance_admin_list_app_profiles_sync.py | 53 - ...able_instance_admin_list_clusters_async.py | 52 - ...table_instance_admin_list_clusters_sync.py | 52 - ...e_instance_admin_list_hot_tablets_async.py | 53 - ...le_instance_admin_list_hot_tablets_sync.py | 53 - ...ble_instance_admin_list_instances_async.py | 52 - ...able_instance_admin_list_instances_sync.py | 52 - ...instance_admin_list_logical_views_async.py | 53 - ..._instance_admin_list_logical_views_sync.py | 53 - ...nce_admin_list_materialized_views_async.py | 53 - ...ance_admin_list_materialized_views_sync.py | 53 - ...ance_admin_partial_update_cluster_async.py | 55 - ...tance_admin_partial_update_cluster_sync.py | 55 - ...nce_admin_partial_update_instance_async.py | 59 - ...ance_admin_partial_update_instance_sync.py | 59 - ...ble_instance_admin_set_iam_policy_async.py | 53 - ...able_instance_admin_set_iam_policy_sync.py | 53 - ...stance_admin_test_iam_permissions_async.py | 54 - ...nstance_admin_test_iam_permissions_sync.py | 54 - ...instance_admin_update_app_profile_async.py | 59 - ..._instance_admin_update_app_profile_sync.py | 59 - ...ble_instance_admin_update_cluster_async.py | 55 - ...able_instance_admin_update_cluster_sync.py | 55 - ...le_instance_admin_update_instance_async.py | 52 - ...ble_instance_admin_update_instance_sync.py | 52 - ...nstance_admin_update_logical_view_async.py | 59 - ...instance_admin_update_logical_view_sync.py | 59 - ...ce_admin_update_materialized_view_async.py | 59 - ...nce_admin_update_materialized_view_sync.py | 59 - ...ble_table_admin_check_consistency_async.py | 53 - ...able_table_admin_check_consistency_sync.py | 53 - ..._bigtable_table_admin_copy_backup_async.py | 58 - ...d_bigtable_table_admin_copy_backup_sync.py | 58 - ...able_admin_create_authorized_view_async.py | 57 - ...table_admin_create_authorized_view_sync.py | 57 - ...igtable_table_admin_create_backup_async.py | 61 - ...bigtable_table_admin_create_backup_sync.py | 61 - ...min_create_schema_bundle_async_internal.py | 61 - ...dmin_create_schema_bundle_sync_internal.py | 61 - ...bigtable_table_admin_create_table_async.py | 53 - ..._admin_create_table_from_snapshot_async.py | 58 - ...e_admin_create_table_from_snapshot_sync.py | 58 - ..._bigtable_table_admin_create_table_sync.py | 53 - ...able_admin_delete_authorized_view_async.py | 50 - ...table_admin_delete_authorized_view_sync.py | 50 - ...igtable_table_admin_delete_backup_async.py | 50 - ...bigtable_table_admin_delete_backup_sync.py | 50 - ...min_delete_schema_bundle_async_internal.py | 50 - ...dmin_delete_schema_bundle_sync_internal.py | 50 - ...le_admin_delete_snapshot_async_internal.py | 50 - ...ble_admin_delete_snapshot_sync_internal.py | 50 - ...bigtable_table_admin_delete_table_async.py | 50 - ..._bigtable_table_admin_delete_table_sync.py | 50 - ...gtable_table_admin_drop_row_range_async.py | 51 - ...igtable_table_admin_drop_row_range_sync.py | 51 - ..._admin_generate_consistency_token_async.py | 52 - ...e_admin_generate_consistency_token_sync.py | 52 - ...e_table_admin_get_authorized_view_async.py | 52 - ...le_table_admin_get_authorized_view_sync.py | 52 - ...d_bigtable_table_admin_get_backup_async.py | 52 - ...ed_bigtable_table_admin_get_backup_sync.py | 52 - ...gtable_table_admin_get_iam_policy_async.py | 53 - ...igtable_table_admin_get_iam_policy_sync.py | 53 - ..._admin_get_schema_bundle_async_internal.py | 52 - ...e_admin_get_schema_bundle_sync_internal.py | 52 - ...table_admin_get_snapshot_async_internal.py | 52 - ..._table_admin_get_snapshot_sync_internal.py | 52 - ...ed_bigtable_table_admin_get_table_async.py | 52 - ...ted_bigtable_table_admin_get_table_sync.py | 52 - ...table_admin_list_authorized_views_async.py | 53 - ..._table_admin_list_authorized_views_sync.py | 53 - ...bigtable_table_admin_list_backups_async.py | 53 - ..._bigtable_table_admin_list_backups_sync.py | 53 - ...dmin_list_schema_bundles_async_internal.py | 53 - ...admin_list_schema_bundles_sync_internal.py | 53 - ...ble_admin_list_snapshots_async_internal.py | 53 - ...able_admin_list_snapshots_sync_internal.py | 53 - ..._bigtable_table_admin_list_tables_async.py | 53 - ...d_bigtable_table_admin_list_tables_sync.py | 53 - ...n_modify_column_families_async_internal.py | 52 - ...in_modify_column_families_sync_internal.py | 52 - ...able_admin_restore_table_async_internal.py | 58 - ...table_admin_restore_table_sync_internal.py | 58 - ...gtable_table_admin_set_iam_policy_async.py | 53 - ...igtable_table_admin_set_iam_policy_sync.py | 53 - ...ble_admin_snapshot_table_async_internal.py | 58 - ...able_admin_snapshot_table_sync_internal.py | 58 - ..._table_admin_test_iam_permissions_async.py | 54 - ...e_table_admin_test_iam_permissions_sync.py | 54 - ...gtable_table_admin_undelete_table_async.py | 56 - ...igtable_table_admin_undelete_table_sync.py | 56 - ...able_admin_update_authorized_view_async.py | 55 - ...table_admin_update_authorized_view_sync.py | 55 - ...igtable_table_admin_update_backup_async.py | 55 - ...bigtable_table_admin_update_backup_sync.py | 55 - ...min_update_schema_bundle_async_internal.py | 59 - ...dmin_update_schema_bundle_sync_internal.py | 59 - ...bigtable_table_admin_update_table_async.py | 55 - ..._bigtable_table_admin_update_table_sync.py | 55 - ...pet_metadata_google.bigtable.admin.v2.json | 10871 ------ .../unit/gapic/bigtable_admin_v2/__init__.py | 15 - .../test_bigtable_instance_admin.py | 26520 -------------- .../test_bigtable_table_admin.py | 29631 ---------------- 172 files changed, 129427 deletions(-) delete mode 100644 google/cloud/bigtable_admin/__init__.py delete mode 100644 google/cloud/bigtable_admin/py.typed delete mode 100644 google/cloud/bigtable_admin_v2/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic_metadata.json delete mode 100644 google/cloud/bigtable_admin_v2/py.typed delete mode 100644 google/cloud/bigtable_admin_v2/services/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/README.rst delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/README.rst delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py delete mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py delete mode 100644 google/cloud/bigtable_admin_v2/types/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py delete mode 100644 google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py delete mode 100644 google/cloud/bigtable_admin_v2/types/common.py delete mode 100644 google/cloud/bigtable_admin_v2/types/instance.py delete mode 100644 google/cloud/bigtable_admin_v2/types/oneof_message.py delete mode 100644 google/cloud/bigtable_admin_v2/types/table.py delete mode 100644 google/cloud/bigtable_admin_v2/types/types.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync_internal.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py delete mode 100644 samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py delete mode 100644 samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json delete mode 100644 tests/unit/gapic/bigtable_admin_v2/__init__.py delete mode 100644 tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py delete mode 100644 tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py diff --git a/google/cloud/bigtable_admin/__init__.py b/google/cloud/bigtable_admin/__init__.py deleted file mode 100644 index 00353ea96..000000000 --- a/google/cloud/bigtable_admin/__init__.py +++ /dev/null @@ -1,451 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from google.cloud.bigtable_admin import gapic_version as package_version - -__version__ = package_version.__version__ - - -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.client import ( - BigtableInstanceAdminClient, -) -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.async_client import ( - BigtableInstanceAdminAsyncClient, -) -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.client import ( - BaseBigtableTableAdminClient, -) -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.async_client import ( - BaseBigtableTableAdminAsyncClient, -) - -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - CreateAppProfileRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - CreateClusterMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - CreateClusterRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - CreateInstanceMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - CreateInstanceRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - CreateLogicalViewMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - CreateLogicalViewRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - CreateMaterializedViewMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - CreateMaterializedViewRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - DeleteAppProfileRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - DeleteClusterRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - DeleteInstanceRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - DeleteLogicalViewRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - DeleteMaterializedViewRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - GetAppProfileRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - GetClusterRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - GetInstanceRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - GetLogicalViewRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - GetMaterializedViewRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - ListAppProfilesRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - ListAppProfilesResponse, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - ListClustersRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - ListClustersResponse, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - ListHotTabletsRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - ListHotTabletsResponse, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - ListInstancesRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - ListInstancesResponse, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - ListLogicalViewsRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - ListLogicalViewsResponse, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - ListMaterializedViewsRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - ListMaterializedViewsResponse, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - PartialUpdateClusterMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - PartialUpdateClusterRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - PartialUpdateInstanceRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - UpdateAppProfileMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - UpdateAppProfileRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - UpdateClusterMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - UpdateInstanceMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - UpdateLogicalViewMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - UpdateLogicalViewRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - UpdateMaterializedViewMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( - UpdateMaterializedViewRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - CheckConsistencyRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - CheckConsistencyResponse, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - CreateAuthorizedViewMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - CreateAuthorizedViewRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - CreateBackupMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - CreateBackupRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - CreateSchemaBundleMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - CreateSchemaBundleRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - CreateTableFromSnapshotMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - CreateTableFromSnapshotRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateTableRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - DataBoostReadLocalWrites, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - DeleteAuthorizedViewRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - DeleteBackupRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - DeleteSchemaBundleRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - DeleteSnapshotRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import DeleteTableRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - DropRowRangeRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - GenerateConsistencyTokenRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - GenerateConsistencyTokenResponse, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - GetAuthorizedViewRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetBackupRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - GetSchemaBundleRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetSnapshotRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetTableRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - ListAuthorizedViewsRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - ListAuthorizedViewsResponse, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListBackupsRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - ListBackupsResponse, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - ListSchemaBundlesRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - ListSchemaBundlesResponse, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - ListSnapshotsRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - ListSnapshotsResponse, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListTablesRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListTablesResponse -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - ModifyColumnFamiliesRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - OptimizeRestoredTableMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - RestoreTableMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - RestoreTableRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - SnapshotTableMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - SnapshotTableRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - StandardReadRemoteWrites, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - UndeleteTableMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - UndeleteTableRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - UpdateAuthorizedViewMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - UpdateAuthorizedViewRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - UpdateBackupRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - UpdateSchemaBundleMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - UpdateSchemaBundleRequest, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( - UpdateTableMetadata, -) -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UpdateTableRequest -from google.cloud.bigtable_admin_v2.types.common import OperationProgress -from google.cloud.bigtable_admin_v2.types.common import StorageType -from google.cloud.bigtable_admin_v2.types.instance import AppProfile -from google.cloud.bigtable_admin_v2.types.instance import AutoscalingLimits -from google.cloud.bigtable_admin_v2.types.instance import AutoscalingTargets -from google.cloud.bigtable_admin_v2.types.instance import Cluster -from google.cloud.bigtable_admin_v2.types.instance import HotTablet -from google.cloud.bigtable_admin_v2.types.instance import Instance -from google.cloud.bigtable_admin_v2.types.instance import LogicalView -from google.cloud.bigtable_admin_v2.types.instance import MaterializedView -from google.cloud.bigtable_admin_v2.types.table import AuthorizedView -from google.cloud.bigtable_admin_v2.types.table import Backup -from google.cloud.bigtable_admin_v2.types.table import BackupInfo -from google.cloud.bigtable_admin_v2.types.table import ChangeStreamConfig -from google.cloud.bigtable_admin_v2.types.table import ColumnFamily -from google.cloud.bigtable_admin_v2.types.table import EncryptionInfo -from google.cloud.bigtable_admin_v2.types.table import GcRule -from google.cloud.bigtable_admin_v2.types.table import ProtoSchema -from google.cloud.bigtable_admin_v2.types.table import RestoreInfo -from google.cloud.bigtable_admin_v2.types.table import SchemaBundle -from google.cloud.bigtable_admin_v2.types.table import Snapshot -from google.cloud.bigtable_admin_v2.types.table import Table -from google.cloud.bigtable_admin_v2.types.table import RestoreSourceType -from google.cloud.bigtable_admin_v2.types.types import Type - -__all__ = ( - "BigtableInstanceAdminClient", - "BigtableInstanceAdminAsyncClient", - "BaseBigtableTableAdminClient", - "BaseBigtableTableAdminAsyncClient", - "CreateAppProfileRequest", - "CreateClusterMetadata", - "CreateClusterRequest", - "CreateInstanceMetadata", - "CreateInstanceRequest", - "CreateLogicalViewMetadata", - "CreateLogicalViewRequest", - "CreateMaterializedViewMetadata", - "CreateMaterializedViewRequest", - "DeleteAppProfileRequest", - "DeleteClusterRequest", - "DeleteInstanceRequest", - "DeleteLogicalViewRequest", - "DeleteMaterializedViewRequest", - "GetAppProfileRequest", - "GetClusterRequest", - "GetInstanceRequest", - "GetLogicalViewRequest", - "GetMaterializedViewRequest", - "ListAppProfilesRequest", - "ListAppProfilesResponse", - "ListClustersRequest", - "ListClustersResponse", - "ListHotTabletsRequest", - "ListHotTabletsResponse", - "ListInstancesRequest", - "ListInstancesResponse", - "ListLogicalViewsRequest", - "ListLogicalViewsResponse", - "ListMaterializedViewsRequest", - "ListMaterializedViewsResponse", - "PartialUpdateClusterMetadata", - "PartialUpdateClusterRequest", - "PartialUpdateInstanceRequest", - "UpdateAppProfileMetadata", - "UpdateAppProfileRequest", - "UpdateClusterMetadata", - "UpdateInstanceMetadata", - "UpdateLogicalViewMetadata", - "UpdateLogicalViewRequest", - "UpdateMaterializedViewMetadata", - "UpdateMaterializedViewRequest", - "CheckConsistencyRequest", - "CheckConsistencyResponse", - "CopyBackupMetadata", - "CopyBackupRequest", - "CreateAuthorizedViewMetadata", - "CreateAuthorizedViewRequest", - "CreateBackupMetadata", - "CreateBackupRequest", - "CreateSchemaBundleMetadata", - "CreateSchemaBundleRequest", - "CreateTableFromSnapshotMetadata", - "CreateTableFromSnapshotRequest", - "CreateTableRequest", - "DataBoostReadLocalWrites", - "DeleteAuthorizedViewRequest", - "DeleteBackupRequest", - "DeleteSchemaBundleRequest", - "DeleteSnapshotRequest", - "DeleteTableRequest", - "DropRowRangeRequest", - "GenerateConsistencyTokenRequest", - "GenerateConsistencyTokenResponse", - "GetAuthorizedViewRequest", - "GetBackupRequest", - "GetSchemaBundleRequest", - "GetSnapshotRequest", - "GetTableRequest", - "ListAuthorizedViewsRequest", - "ListAuthorizedViewsResponse", - "ListBackupsRequest", - "ListBackupsResponse", - "ListSchemaBundlesRequest", - "ListSchemaBundlesResponse", - "ListSnapshotsRequest", - "ListSnapshotsResponse", - "ListTablesRequest", - "ListTablesResponse", - "ModifyColumnFamiliesRequest", - "OptimizeRestoredTableMetadata", - "RestoreTableMetadata", - "RestoreTableRequest", - "SnapshotTableMetadata", - "SnapshotTableRequest", - "StandardReadRemoteWrites", - "UndeleteTableMetadata", - "UndeleteTableRequest", - "UpdateAuthorizedViewMetadata", - "UpdateAuthorizedViewRequest", - "UpdateBackupRequest", - "UpdateSchemaBundleMetadata", - "UpdateSchemaBundleRequest", - "UpdateTableMetadata", - "UpdateTableRequest", - "OperationProgress", - "StorageType", - "AppProfile", - "AutoscalingLimits", - "AutoscalingTargets", - "Cluster", - "HotTablet", - "Instance", - "LogicalView", - "MaterializedView", - "AuthorizedView", - "Backup", - "BackupInfo", - "ChangeStreamConfig", - "ColumnFamily", - "EncryptionInfo", - "GcRule", - "ProtoSchema", - "RestoreInfo", - "SchemaBundle", - "Snapshot", - "Table", - "RestoreSourceType", - "Type", -) - -import google.cloud.bigtable_admin_v2.overlay # noqa: F401 -from google.cloud.bigtable_admin_v2.overlay import * # noqa: F401, F403 - -__all__ += google.cloud.bigtable_admin_v2.overlay.__all__ diff --git a/google/cloud/bigtable_admin/py.typed b/google/cloud/bigtable_admin/py.typed deleted file mode 100644 index bc26f2069..000000000 --- a/google/cloud/bigtable_admin/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-bigtable-admin package uses inline types. diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py deleted file mode 100644 index 713b2408f..000000000 --- a/google/cloud/bigtable_admin_v2/__init__.py +++ /dev/null @@ -1,274 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -__version__ = package_version.__version__ - - -from .services.bigtable_instance_admin import BigtableInstanceAdminClient -from .services.bigtable_instance_admin import BigtableInstanceAdminAsyncClient -from .services.bigtable_table_admin import BaseBigtableTableAdminClient -from .services.bigtable_table_admin import BaseBigtableTableAdminAsyncClient - -from .types.bigtable_instance_admin import CreateAppProfileRequest -from .types.bigtable_instance_admin import CreateClusterMetadata -from .types.bigtable_instance_admin import CreateClusterRequest -from .types.bigtable_instance_admin import CreateInstanceMetadata -from .types.bigtable_instance_admin import CreateInstanceRequest -from .types.bigtable_instance_admin import CreateLogicalViewMetadata -from .types.bigtable_instance_admin import CreateLogicalViewRequest -from .types.bigtable_instance_admin import CreateMaterializedViewMetadata -from .types.bigtable_instance_admin import CreateMaterializedViewRequest -from .types.bigtable_instance_admin import DeleteAppProfileRequest -from .types.bigtable_instance_admin import DeleteClusterRequest -from .types.bigtable_instance_admin import DeleteInstanceRequest -from .types.bigtable_instance_admin import DeleteLogicalViewRequest -from .types.bigtable_instance_admin import DeleteMaterializedViewRequest -from .types.bigtable_instance_admin import GetAppProfileRequest -from .types.bigtable_instance_admin import GetClusterRequest -from .types.bigtable_instance_admin import GetInstanceRequest -from .types.bigtable_instance_admin import GetLogicalViewRequest -from .types.bigtable_instance_admin import GetMaterializedViewRequest -from .types.bigtable_instance_admin import ListAppProfilesRequest -from .types.bigtable_instance_admin import ListAppProfilesResponse -from .types.bigtable_instance_admin import ListClustersRequest -from .types.bigtable_instance_admin import ListClustersResponse -from .types.bigtable_instance_admin import ListHotTabletsRequest -from .types.bigtable_instance_admin import ListHotTabletsResponse -from .types.bigtable_instance_admin import ListInstancesRequest -from .types.bigtable_instance_admin import ListInstancesResponse -from .types.bigtable_instance_admin import ListLogicalViewsRequest -from .types.bigtable_instance_admin import ListLogicalViewsResponse -from .types.bigtable_instance_admin import ListMaterializedViewsRequest -from .types.bigtable_instance_admin import ListMaterializedViewsResponse -from .types.bigtable_instance_admin import PartialUpdateClusterMetadata -from .types.bigtable_instance_admin import PartialUpdateClusterRequest -from .types.bigtable_instance_admin import PartialUpdateInstanceRequest -from .types.bigtable_instance_admin import UpdateAppProfileMetadata -from .types.bigtable_instance_admin import UpdateAppProfileRequest -from .types.bigtable_instance_admin import UpdateClusterMetadata -from .types.bigtable_instance_admin import UpdateInstanceMetadata -from .types.bigtable_instance_admin import UpdateLogicalViewMetadata -from .types.bigtable_instance_admin import UpdateLogicalViewRequest -from .types.bigtable_instance_admin import UpdateMaterializedViewMetadata -from .types.bigtable_instance_admin import UpdateMaterializedViewRequest -from .types.bigtable_table_admin import CheckConsistencyRequest -from .types.bigtable_table_admin import CheckConsistencyResponse -from .types.bigtable_table_admin import CopyBackupMetadata -from .types.bigtable_table_admin import CopyBackupRequest -from .types.bigtable_table_admin import CreateAuthorizedViewMetadata -from .types.bigtable_table_admin import CreateAuthorizedViewRequest -from .types.bigtable_table_admin import CreateBackupMetadata -from .types.bigtable_table_admin import CreateBackupRequest -from .types.bigtable_table_admin import CreateSchemaBundleMetadata -from .types.bigtable_table_admin import CreateSchemaBundleRequest -from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata -from .types.bigtable_table_admin import CreateTableFromSnapshotRequest -from .types.bigtable_table_admin import CreateTableRequest -from .types.bigtable_table_admin import DataBoostReadLocalWrites -from .types.bigtable_table_admin import DeleteAuthorizedViewRequest -from .types.bigtable_table_admin import DeleteBackupRequest -from .types.bigtable_table_admin import DeleteSchemaBundleRequest -from .types.bigtable_table_admin import DeleteSnapshotRequest -from .types.bigtable_table_admin import DeleteTableRequest -from .types.bigtable_table_admin import DropRowRangeRequest -from .types.bigtable_table_admin import GenerateConsistencyTokenRequest -from .types.bigtable_table_admin import GenerateConsistencyTokenResponse -from .types.bigtable_table_admin import GetAuthorizedViewRequest -from .types.bigtable_table_admin import GetBackupRequest -from .types.bigtable_table_admin import GetSchemaBundleRequest -from .types.bigtable_table_admin import GetSnapshotRequest -from .types.bigtable_table_admin import GetTableRequest -from .types.bigtable_table_admin import ListAuthorizedViewsRequest -from .types.bigtable_table_admin import ListAuthorizedViewsResponse -from .types.bigtable_table_admin import ListBackupsRequest -from .types.bigtable_table_admin import ListBackupsResponse -from .types.bigtable_table_admin import ListSchemaBundlesRequest -from .types.bigtable_table_admin import ListSchemaBundlesResponse -from .types.bigtable_table_admin import ListSnapshotsRequest -from .types.bigtable_table_admin import ListSnapshotsResponse -from .types.bigtable_table_admin import ListTablesRequest -from .types.bigtable_table_admin import ListTablesResponse -from .types.bigtable_table_admin import ModifyColumnFamiliesRequest -from .types.bigtable_table_admin import OptimizeRestoredTableMetadata -from .types.bigtable_table_admin import RestoreTableMetadata -from .types.bigtable_table_admin import RestoreTableRequest -from .types.bigtable_table_admin import SnapshotTableMetadata -from .types.bigtable_table_admin import SnapshotTableRequest -from .types.bigtable_table_admin import StandardReadRemoteWrites -from .types.bigtable_table_admin import UndeleteTableMetadata -from .types.bigtable_table_admin import UndeleteTableRequest -from .types.bigtable_table_admin import UpdateAuthorizedViewMetadata -from .types.bigtable_table_admin import UpdateAuthorizedViewRequest -from .types.bigtable_table_admin import UpdateBackupRequest -from .types.bigtable_table_admin import UpdateSchemaBundleMetadata -from .types.bigtable_table_admin import UpdateSchemaBundleRequest -from .types.bigtable_table_admin import UpdateTableMetadata -from .types.bigtable_table_admin import UpdateTableRequest -from .types.common import OperationProgress -from .types.common import StorageType -from .types.instance import AppProfile -from .types.instance import AutoscalingLimits -from .types.instance import AutoscalingTargets -from .types.instance import Cluster -from .types.instance import HotTablet -from .types.instance import Instance -from .types.instance import LogicalView -from .types.instance import MaterializedView -from .types.table import AuthorizedView -from .types.table import Backup -from .types.table import BackupInfo -from .types.table import ChangeStreamConfig -from .types.table import ColumnFamily -from .types.table import EncryptionInfo -from .types.table import GcRule -from .types.table import ProtoSchema -from .types.table import RestoreInfo -from .types.table import SchemaBundle -from .types.table import Snapshot -from .types.table import Table -from .types.table import RestoreSourceType -from .types.types import Type - -__all__ = ( - "BaseBigtableTableAdminAsyncClient", - "BigtableInstanceAdminAsyncClient", - "AppProfile", - "AuthorizedView", - "AutoscalingLimits", - "AutoscalingTargets", - "Backup", - "BackupInfo", - "BaseBigtableTableAdminClient", - "BigtableInstanceAdminClient", - "ChangeStreamConfig", - "CheckConsistencyRequest", - "CheckConsistencyResponse", - "Cluster", - "ColumnFamily", - "CopyBackupMetadata", - "CopyBackupRequest", - "CreateAppProfileRequest", - "CreateAuthorizedViewMetadata", - "CreateAuthorizedViewRequest", - "CreateBackupMetadata", - "CreateBackupRequest", - "CreateClusterMetadata", - "CreateClusterRequest", - "CreateInstanceMetadata", - "CreateInstanceRequest", - "CreateLogicalViewMetadata", - "CreateLogicalViewRequest", - "CreateMaterializedViewMetadata", - "CreateMaterializedViewRequest", - "CreateSchemaBundleMetadata", - "CreateSchemaBundleRequest", - "CreateTableFromSnapshotMetadata", - "CreateTableFromSnapshotRequest", - "CreateTableRequest", - "DataBoostReadLocalWrites", - "DeleteAppProfileRequest", - "DeleteAuthorizedViewRequest", - "DeleteBackupRequest", - "DeleteClusterRequest", - "DeleteInstanceRequest", - "DeleteLogicalViewRequest", - "DeleteMaterializedViewRequest", - "DeleteSchemaBundleRequest", - "DeleteSnapshotRequest", - "DeleteTableRequest", - "DropRowRangeRequest", - "EncryptionInfo", - "GcRule", - "GenerateConsistencyTokenRequest", - "GenerateConsistencyTokenResponse", - "GetAppProfileRequest", - "GetAuthorizedViewRequest", - "GetBackupRequest", - "GetClusterRequest", - "GetInstanceRequest", - "GetLogicalViewRequest", - "GetMaterializedViewRequest", - "GetSchemaBundleRequest", - "GetSnapshotRequest", - "GetTableRequest", - "HotTablet", - "Instance", - "ListAppProfilesRequest", - "ListAppProfilesResponse", - "ListAuthorizedViewsRequest", - "ListAuthorizedViewsResponse", - "ListBackupsRequest", - "ListBackupsResponse", - "ListClustersRequest", - "ListClustersResponse", - "ListHotTabletsRequest", - "ListHotTabletsResponse", - "ListInstancesRequest", - "ListInstancesResponse", - "ListLogicalViewsRequest", - "ListLogicalViewsResponse", - "ListMaterializedViewsRequest", - "ListMaterializedViewsResponse", - "ListSchemaBundlesRequest", - "ListSchemaBundlesResponse", - "ListSnapshotsRequest", - "ListSnapshotsResponse", - "ListTablesRequest", - "ListTablesResponse", - "LogicalView", - "MaterializedView", - "ModifyColumnFamiliesRequest", - "OperationProgress", - "OptimizeRestoredTableMetadata", - "PartialUpdateClusterMetadata", - "PartialUpdateClusterRequest", - "PartialUpdateInstanceRequest", - "ProtoSchema", - "RestoreInfo", - "RestoreSourceType", - "RestoreTableMetadata", - "RestoreTableRequest", - "SchemaBundle", - "Snapshot", - "SnapshotTableMetadata", - "SnapshotTableRequest", - "StandardReadRemoteWrites", - "StorageType", - "Table", - "Type", - "UndeleteTableMetadata", - "UndeleteTableRequest", - "UpdateAppProfileMetadata", - "UpdateAppProfileRequest", - "UpdateAuthorizedViewMetadata", - "UpdateAuthorizedViewRequest", - "UpdateBackupRequest", - "UpdateClusterMetadata", - "UpdateInstanceMetadata", - "UpdateLogicalViewMetadata", - "UpdateLogicalViewRequest", - "UpdateMaterializedViewMetadata", - "UpdateMaterializedViewRequest", - "UpdateSchemaBundleMetadata", - "UpdateSchemaBundleRequest", - "UpdateTableMetadata", - "UpdateTableRequest", -) - -from .overlay import * # noqa: F403 - -__all__ += overlay.__all__ # noqa: F405 diff --git a/google/cloud/bigtable_admin_v2/gapic_metadata.json b/google/cloud/bigtable_admin_v2/gapic_metadata.json deleted file mode 100644 index d5b64ed18..000000000 --- a/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ /dev/null @@ -1,1037 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.bigtable_admin_v2", - "protoPackage": "google.bigtable.admin.v2", - "schema": "1.0", - "services": { - "BigtableInstanceAdmin": { - "clients": { - "grpc": { - "libraryClient": "BigtableInstanceAdminClient", - "rpcs": { - "CreateAppProfile": { - "methods": [ - "create_app_profile" - ] - }, - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "CreateInstance": { - "methods": [ - "create_instance" - ] - }, - "CreateLogicalView": { - "methods": [ - "create_logical_view" - ] - }, - "CreateMaterializedView": { - "methods": [ - "create_materialized_view" - ] - }, - "DeleteAppProfile": { - "methods": [ - "delete_app_profile" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DeleteInstance": { - "methods": [ - "delete_instance" - ] - }, - "DeleteLogicalView": { - "methods": [ - "delete_logical_view" - ] - }, - "DeleteMaterializedView": { - "methods": [ - "delete_materialized_view" - ] - }, - "GetAppProfile": { - "methods": [ - "get_app_profile" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "GetIamPolicy": { - "methods": [ - "get_iam_policy" - ] - }, - "GetInstance": { - "methods": [ - "get_instance" - ] - }, - "GetLogicalView": { - "methods": [ - "get_logical_view" - ] - }, - "GetMaterializedView": { - "methods": [ - "get_materialized_view" - ] - }, - "ListAppProfiles": { - "methods": [ - "list_app_profiles" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "ListHotTablets": { - "methods": [ - "list_hot_tablets" - ] - }, - "ListInstances": { - "methods": [ - "list_instances" - ] - }, - "ListLogicalViews": { - "methods": [ - "list_logical_views" - ] - }, - "ListMaterializedViews": { - "methods": [ - "list_materialized_views" - ] - }, - "PartialUpdateCluster": { - "methods": [ - "partial_update_cluster" - ] - }, - "PartialUpdateInstance": { - "methods": [ - "partial_update_instance" - ] - }, - "SetIamPolicy": { - "methods": [ - "set_iam_policy" - ] - }, - "TestIamPermissions": { - "methods": [ - "test_iam_permissions" - ] - }, - "UpdateAppProfile": { - "methods": [ - "update_app_profile" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - }, - "UpdateInstance": { - "methods": [ - "update_instance" - ] - }, - "UpdateLogicalView": { - "methods": [ - "update_logical_view" - ] - }, - "UpdateMaterializedView": { - "methods": [ - "update_materialized_view" - ] - } - } - }, - "grpc-async": { - "libraryClient": "BigtableInstanceAdminAsyncClient", - "rpcs": { - "CreateAppProfile": { - "methods": [ - "create_app_profile" - ] - }, - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "CreateInstance": { - "methods": [ - "create_instance" - ] - }, - "CreateLogicalView": { - "methods": [ - "create_logical_view" - ] - }, - "CreateMaterializedView": { - "methods": [ - "create_materialized_view" - ] - }, - "DeleteAppProfile": { - "methods": [ - "delete_app_profile" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DeleteInstance": { - "methods": [ - "delete_instance" - ] - }, - "DeleteLogicalView": { - "methods": [ - "delete_logical_view" - ] - }, - "DeleteMaterializedView": { - "methods": [ - "delete_materialized_view" - ] - }, - "GetAppProfile": { - "methods": [ - "get_app_profile" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "GetIamPolicy": { - "methods": [ - "get_iam_policy" - ] - }, - "GetInstance": { - "methods": [ - "get_instance" - ] - }, - "GetLogicalView": { - "methods": [ - "get_logical_view" - ] - }, - "GetMaterializedView": { - "methods": [ - "get_materialized_view" - ] - }, - "ListAppProfiles": { - "methods": [ - "list_app_profiles" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "ListHotTablets": { - "methods": [ - "list_hot_tablets" - ] - }, - "ListInstances": { - "methods": [ - "list_instances" - ] - }, - "ListLogicalViews": { - "methods": [ - "list_logical_views" - ] - }, - "ListMaterializedViews": { - "methods": [ - "list_materialized_views" - ] - }, - "PartialUpdateCluster": { - "methods": [ - "partial_update_cluster" - ] - }, - "PartialUpdateInstance": { - "methods": [ - "partial_update_instance" - ] - }, - "SetIamPolicy": { - "methods": [ - "set_iam_policy" - ] - }, - "TestIamPermissions": { - "methods": [ - "test_iam_permissions" - ] - }, - "UpdateAppProfile": { - "methods": [ - "update_app_profile" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - }, - "UpdateInstance": { - "methods": [ - "update_instance" - ] - }, - "UpdateLogicalView": { - "methods": [ - "update_logical_view" - ] - }, - "UpdateMaterializedView": { - "methods": [ - "update_materialized_view" - ] - } - } - }, - "rest": { - "libraryClient": "BigtableInstanceAdminClient", - "rpcs": { - "CreateAppProfile": { - "methods": [ - "create_app_profile" - ] - }, - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "CreateInstance": { - "methods": [ - "create_instance" - ] - }, - "CreateLogicalView": { - "methods": [ - "create_logical_view" - ] - }, - "CreateMaterializedView": { - "methods": [ - "create_materialized_view" - ] - }, - "DeleteAppProfile": { - "methods": [ - "delete_app_profile" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DeleteInstance": { - "methods": [ - "delete_instance" - ] - }, - "DeleteLogicalView": { - "methods": [ - "delete_logical_view" - ] - }, - "DeleteMaterializedView": { - "methods": [ - "delete_materialized_view" - ] - }, - "GetAppProfile": { - "methods": [ - "get_app_profile" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "GetIamPolicy": { - "methods": [ - "get_iam_policy" - ] - }, - "GetInstance": { - "methods": [ - "get_instance" - ] - }, - "GetLogicalView": { - "methods": [ - "get_logical_view" - ] - }, - "GetMaterializedView": { - "methods": [ - "get_materialized_view" - ] - }, - "ListAppProfiles": { - "methods": [ - "list_app_profiles" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "ListHotTablets": { - "methods": [ - "list_hot_tablets" - ] - }, - "ListInstances": { - "methods": [ - "list_instances" - ] - }, - "ListLogicalViews": { - "methods": [ - "list_logical_views" - ] - }, - "ListMaterializedViews": { - "methods": [ - "list_materialized_views" - ] - }, - "PartialUpdateCluster": { - "methods": [ - "partial_update_cluster" - ] - }, - "PartialUpdateInstance": { - "methods": [ - "partial_update_instance" - ] - }, - "SetIamPolicy": { - "methods": [ - "set_iam_policy" - ] - }, - "TestIamPermissions": { - "methods": [ - "test_iam_permissions" - ] - }, - "UpdateAppProfile": { - "methods": [ - "update_app_profile" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - }, - "UpdateInstance": { - "methods": [ - "update_instance" - ] - }, - "UpdateLogicalView": { - "methods": [ - "update_logical_view" - ] - }, - "UpdateMaterializedView": { - "methods": [ - "update_materialized_view" - ] - } - } - } - } - }, - "BigtableTableAdmin": { - "clients": { - "grpc": { - "libraryClient": "BaseBigtableTableAdminClient", - "rpcs": { - "CheckConsistency": { - "methods": [ - "check_consistency" - ] - }, - "CopyBackup": { - "methods": [ - "copy_backup" - ] - }, - "CreateAuthorizedView": { - "methods": [ - "create_authorized_view" - ] - }, - "CreateBackup": { - "methods": [ - "create_backup" - ] - }, - "CreateSchemaBundle": { - "methods": [ - "_create_schema_bundle" - ] - }, - "CreateTable": { - "methods": [ - "create_table" - ] - }, - "CreateTableFromSnapshot": { - "methods": [ - "create_table_from_snapshot" - ] - }, - "DeleteAuthorizedView": { - "methods": [ - "delete_authorized_view" - ] - }, - "DeleteBackup": { - "methods": [ - "delete_backup" - ] - }, - "DeleteSchemaBundle": { - "methods": [ - "_delete_schema_bundle" - ] - }, - "DeleteSnapshot": { - "methods": [ - "_delete_snapshot" - ] - }, - "DeleteTable": { - "methods": [ - "delete_table" - ] - }, - "DropRowRange": { - "methods": [ - "drop_row_range" - ] - }, - "GenerateConsistencyToken": { - "methods": [ - "generate_consistency_token" - ] - }, - "GetAuthorizedView": { - "methods": [ - "get_authorized_view" - ] - }, - "GetBackup": { - "methods": [ - "get_backup" - ] - }, - "GetIamPolicy": { - "methods": [ - "get_iam_policy" - ] - }, - "GetSchemaBundle": { - "methods": [ - "_get_schema_bundle" - ] - }, - "GetSnapshot": { - "methods": [ - "_get_snapshot" - ] - }, - "GetTable": { - "methods": [ - "get_table" - ] - }, - "ListAuthorizedViews": { - "methods": [ - "list_authorized_views" - ] - }, - "ListBackups": { - "methods": [ - "list_backups" - ] - }, - "ListSchemaBundles": { - "methods": [ - "_list_schema_bundles" - ] - }, - "ListSnapshots": { - "methods": [ - "_list_snapshots" - ] - }, - "ListTables": { - "methods": [ - "list_tables" - ] - }, - "ModifyColumnFamilies": { - "methods": [ - "_modify_column_families" - ] - }, - "RestoreTable": { - "methods": [ - "_restore_table" - ] - }, - "SetIamPolicy": { - "methods": [ - "set_iam_policy" - ] - }, - "SnapshotTable": { - "methods": [ - "_snapshot_table" - ] - }, - "TestIamPermissions": { - "methods": [ - "test_iam_permissions" - ] - }, - "UndeleteTable": { - "methods": [ - "undelete_table" - ] - }, - "UpdateAuthorizedView": { - "methods": [ - "update_authorized_view" - ] - }, - "UpdateBackup": { - "methods": [ - "update_backup" - ] - }, - "UpdateSchemaBundle": { - "methods": [ - "_update_schema_bundle" - ] - }, - "UpdateTable": { - "methods": [ - "update_table" - ] - } - } - }, - "grpc-async": { - "libraryClient": "BaseBigtableTableAdminAsyncClient", - "rpcs": { - "CheckConsistency": { - "methods": [ - "check_consistency" - ] - }, - "CopyBackup": { - "methods": [ - "copy_backup" - ] - }, - "CreateAuthorizedView": { - "methods": [ - "create_authorized_view" - ] - }, - "CreateBackup": { - "methods": [ - "create_backup" - ] - }, - "CreateSchemaBundle": { - "methods": [ - "_create_schema_bundle" - ] - }, - "CreateTable": { - "methods": [ - "create_table" - ] - }, - "CreateTableFromSnapshot": { - "methods": [ - "create_table_from_snapshot" - ] - }, - "DeleteAuthorizedView": { - "methods": [ - "delete_authorized_view" - ] - }, - "DeleteBackup": { - "methods": [ - "delete_backup" - ] - }, - "DeleteSchemaBundle": { - "methods": [ - "_delete_schema_bundle" - ] - }, - "DeleteSnapshot": { - "methods": [ - "_delete_snapshot" - ] - }, - "DeleteTable": { - "methods": [ - "delete_table" - ] - }, - "DropRowRange": { - "methods": [ - "drop_row_range" - ] - }, - "GenerateConsistencyToken": { - "methods": [ - "generate_consistency_token" - ] - }, - "GetAuthorizedView": { - "methods": [ - "get_authorized_view" - ] - }, - "GetBackup": { - "methods": [ - "get_backup" - ] - }, - "GetIamPolicy": { - "methods": [ - "get_iam_policy" - ] - }, - "GetSchemaBundle": { - "methods": [ - "_get_schema_bundle" - ] - }, - "GetSnapshot": { - "methods": [ - "_get_snapshot" - ] - }, - "GetTable": { - "methods": [ - "get_table" - ] - }, - "ListAuthorizedViews": { - "methods": [ - "list_authorized_views" - ] - }, - "ListBackups": { - "methods": [ - "list_backups" - ] - }, - "ListSchemaBundles": { - "methods": [ - "_list_schema_bundles" - ] - }, - "ListSnapshots": { - "methods": [ - "_list_snapshots" - ] - }, - "ListTables": { - "methods": [ - "list_tables" - ] - }, - "ModifyColumnFamilies": { - "methods": [ - "_modify_column_families" - ] - }, - "RestoreTable": { - "methods": [ - "_restore_table" - ] - }, - "SetIamPolicy": { - "methods": [ - "set_iam_policy" - ] - }, - "SnapshotTable": { - "methods": [ - "_snapshot_table" - ] - }, - "TestIamPermissions": { - "methods": [ - "test_iam_permissions" - ] - }, - "UndeleteTable": { - "methods": [ - "undelete_table" - ] - }, - "UpdateAuthorizedView": { - "methods": [ - "update_authorized_view" - ] - }, - "UpdateBackup": { - "methods": [ - "update_backup" - ] - }, - "UpdateSchemaBundle": { - "methods": [ - "_update_schema_bundle" - ] - }, - "UpdateTable": { - "methods": [ - "update_table" - ] - } - } - }, - "rest": { - "libraryClient": "BaseBigtableTableAdminClient", - "rpcs": { - "CheckConsistency": { - "methods": [ - "check_consistency" - ] - }, - "CopyBackup": { - "methods": [ - "copy_backup" - ] - }, - "CreateAuthorizedView": { - "methods": [ - "create_authorized_view" - ] - }, - "CreateBackup": { - "methods": [ - "create_backup" - ] - }, - "CreateSchemaBundle": { - "methods": [ - "_create_schema_bundle" - ] - }, - "CreateTable": { - "methods": [ - "create_table" - ] - }, - "CreateTableFromSnapshot": { - "methods": [ - "create_table_from_snapshot" - ] - }, - "DeleteAuthorizedView": { - "methods": [ - "delete_authorized_view" - ] - }, - "DeleteBackup": { - "methods": [ - "delete_backup" - ] - }, - "DeleteSchemaBundle": { - "methods": [ - "_delete_schema_bundle" - ] - }, - "DeleteSnapshot": { - "methods": [ - "_delete_snapshot" - ] - }, - "DeleteTable": { - "methods": [ - "delete_table" - ] - }, - "DropRowRange": { - "methods": [ - "drop_row_range" - ] - }, - "GenerateConsistencyToken": { - "methods": [ - "generate_consistency_token" - ] - }, - "GetAuthorizedView": { - "methods": [ - "get_authorized_view" - ] - }, - "GetBackup": { - "methods": [ - "get_backup" - ] - }, - "GetIamPolicy": { - "methods": [ - "get_iam_policy" - ] - }, - "GetSchemaBundle": { - "methods": [ - "_get_schema_bundle" - ] - }, - "GetSnapshot": { - "methods": [ - "_get_snapshot" - ] - }, - "GetTable": { - "methods": [ - "get_table" - ] - }, - "ListAuthorizedViews": { - "methods": [ - "list_authorized_views" - ] - }, - "ListBackups": { - "methods": [ - "list_backups" - ] - }, - "ListSchemaBundles": { - "methods": [ - "_list_schema_bundles" - ] - }, - "ListSnapshots": { - "methods": [ - "_list_snapshots" - ] - }, - "ListTables": { - "methods": [ - "list_tables" - ] - }, - "ModifyColumnFamilies": { - "methods": [ - "_modify_column_families" - ] - }, - "RestoreTable": { - "methods": [ - "_restore_table" - ] - }, - "SetIamPolicy": { - "methods": [ - "set_iam_policy" - ] - }, - "SnapshotTable": { - "methods": [ - "_snapshot_table" - ] - }, - "TestIamPermissions": { - "methods": [ - "test_iam_permissions" - ] - }, - "UndeleteTable": { - "methods": [ - "undelete_table" - ] - }, - "UpdateAuthorizedView": { - "methods": [ - "update_authorized_view" - ] - }, - "UpdateBackup": { - "methods": [ - "update_backup" - ] - }, - "UpdateSchemaBundle": { - "methods": [ - "_update_schema_bundle" - ] - }, - "UpdateTable": { - "methods": [ - "update_table" - ] - } - } - } - } - } - } -} diff --git a/google/cloud/bigtable_admin_v2/py.typed b/google/cloud/bigtable_admin_v2/py.typed deleted file mode 100644 index bc26f2069..000000000 --- a/google/cloud/bigtable_admin_v2/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-bigtable-admin package uses inline types. diff --git a/google/cloud/bigtable_admin_v2/services/__init__.py b/google/cloud/bigtable_admin_v2/services/__init__.py deleted file mode 100644 index cbf94b283..000000000 --- a/google/cloud/bigtable_admin_v2/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py deleted file mode 100644 index 20ac9e4fc..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import BigtableInstanceAdminClient -from .async_client import BigtableInstanceAdminAsyncClient - -__all__ = ( - "BigtableInstanceAdminClient", - "BigtableInstanceAdminAsyncClient", -) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py deleted file mode 100644 index a1aee2370..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ /dev/null @@ -1,4340 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import logging as std_logging -from collections import OrderedDict -import re -from typing import ( - Dict, - Callable, - Mapping, - MutableMapping, - MutableSequence, - Optional, - Sequence, - Tuple, - Type, - Union, -) - -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry_async as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore -import google.protobuf - - -try: - OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import common -from google.cloud.bigtable_admin_v2.types import instance -from google.cloud.bigtable_admin_v2.types import instance as gba_instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport -from .client import BigtableInstanceAdminClient - -try: - from google.api_core import client_logging # type: ignore - - CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER -except ImportError: # pragma: NO COVER - CLIENT_LOGGING_SUPPORTED = False - -_LOGGER = std_logging.getLogger(__name__) - - -class BigtableInstanceAdminAsyncClient: - """Service for creating, configuring, and deleting Cloud - Bigtable Instances and Clusters. Provides access to the Instance - and Cluster schemas only, not the tables' metadata or data - stored in those tables. - """ - - _client: BigtableInstanceAdminClient - - # Copy defaults from the synchronous client for use here. - # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. - DEFAULT_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT - _DEFAULT_ENDPOINT_TEMPLATE = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE - _DEFAULT_UNIVERSE = BigtableInstanceAdminClient._DEFAULT_UNIVERSE - - app_profile_path = staticmethod(BigtableInstanceAdminClient.app_profile_path) - parse_app_profile_path = staticmethod( - BigtableInstanceAdminClient.parse_app_profile_path - ) - cluster_path = staticmethod(BigtableInstanceAdminClient.cluster_path) - parse_cluster_path = staticmethod(BigtableInstanceAdminClient.parse_cluster_path) - crypto_key_path = staticmethod(BigtableInstanceAdminClient.crypto_key_path) - parse_crypto_key_path = staticmethod( - BigtableInstanceAdminClient.parse_crypto_key_path - ) - hot_tablet_path = staticmethod(BigtableInstanceAdminClient.hot_tablet_path) - parse_hot_tablet_path = staticmethod( - BigtableInstanceAdminClient.parse_hot_tablet_path - ) - instance_path = staticmethod(BigtableInstanceAdminClient.instance_path) - parse_instance_path = staticmethod(BigtableInstanceAdminClient.parse_instance_path) - logical_view_path = staticmethod(BigtableInstanceAdminClient.logical_view_path) - parse_logical_view_path = staticmethod( - BigtableInstanceAdminClient.parse_logical_view_path - ) - materialized_view_path = staticmethod( - BigtableInstanceAdminClient.materialized_view_path - ) - parse_materialized_view_path = staticmethod( - BigtableInstanceAdminClient.parse_materialized_view_path - ) - table_path = staticmethod(BigtableInstanceAdminClient.table_path) - parse_table_path = staticmethod(BigtableInstanceAdminClient.parse_table_path) - common_billing_account_path = staticmethod( - BigtableInstanceAdminClient.common_billing_account_path - ) - parse_common_billing_account_path = staticmethod( - BigtableInstanceAdminClient.parse_common_billing_account_path - ) - common_folder_path = staticmethod(BigtableInstanceAdminClient.common_folder_path) - parse_common_folder_path = staticmethod( - BigtableInstanceAdminClient.parse_common_folder_path - ) - common_organization_path = staticmethod( - BigtableInstanceAdminClient.common_organization_path - ) - parse_common_organization_path = staticmethod( - BigtableInstanceAdminClient.parse_common_organization_path - ) - common_project_path = staticmethod(BigtableInstanceAdminClient.common_project_path) - parse_common_project_path = staticmethod( - BigtableInstanceAdminClient.parse_common_project_path - ) - common_location_path = staticmethod( - BigtableInstanceAdminClient.common_location_path - ) - parse_common_location_path = staticmethod( - BigtableInstanceAdminClient.parse_common_location_path - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableInstanceAdminAsyncClient: The constructed client. - """ - return BigtableInstanceAdminClient.from_service_account_info.__func__(BigtableInstanceAdminAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableInstanceAdminAsyncClient: The constructed client. - """ - return BigtableInstanceAdminClient.from_service_account_file.__func__(BigtableInstanceAdminAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @classmethod - def get_mtls_endpoint_and_cert_source( - cls, client_options: Optional[ClientOptions] = None - ): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - return BigtableInstanceAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore - - @property - def transport(self) -> BigtableInstanceAdminTransport: - """Returns the transport used by the client instance. - - Returns: - BigtableInstanceAdminTransport: The transport used by the client instance. - """ - return self._client.transport - - @property - def api_endpoint(self): - """Return the API endpoint used by the client instance. - - Returns: - str: The API endpoint used by the client instance. - """ - return self._client._api_endpoint - - @property - def universe_domain(self) -> str: - """Return the universe domain used by the client instance. - - Returns: - str: The universe domain used - by the client instance. - """ - return self._client._universe_domain - - get_transport_class = BigtableInstanceAdminClient.get_transport_class - - def __init__( - self, - *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[ - Union[ - str, - BigtableInstanceAdminTransport, - Callable[..., BigtableInstanceAdminTransport], - ] - ] = "grpc_asyncio", - client_options: Optional[ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the bigtable instance admin async client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Optional[Union[str,BigtableInstanceAdminTransport,Callable[..., BigtableInstanceAdminTransport]]]): - The transport to use, or a Callable that constructs and returns a new transport to use. - If a Callable is given, it will be called with the same set of initialization - arguments as used in the BigtableInstanceAdminTransport constructor. - If set to None, a transport is chosen automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): - Custom options for the client. - - 1. The ``api_endpoint`` property can be used to override the - default endpoint provided by the client when ``transport`` is - not explicitly provided. Only if this property is not set and - ``transport`` was not explicitly provided, the endpoint is - determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment - variable, which have one of the following values: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto-switch to the - default mTLS endpoint if client certificate is present; this is - the default value). - - 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide a client certificate for mTLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - 3. The ``universe_domain`` property can be used to override the - default "googleapis.com" universe. Note that ``api_endpoint`` - property still takes precedence; and ``universe_domain`` is - currently not supported for mTLS. - - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = BigtableInstanceAdminClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - std_logging.DEBUG - ): # pragma: NO COVER - _LOGGER.debug( - "Created client `google.bigtable.admin_v2.BigtableInstanceAdminAsyncClient`.", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "credentialsType": None, - }, - ) - - async def create_instance( - self, - request: Optional[ - Union[bigtable_instance_admin.CreateInstanceRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - instance_id: Optional[str] = None, - instance: Optional[gba_instance.Instance] = None, - clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Create an instance within a project. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_create_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - instance = bigtable_admin_v2.Instance() - instance.display_name = "display_name_value" - - request = bigtable_admin_v2.CreateInstanceRequest( - parent="parent_value", - instance_id="instance_id_value", - instance=instance, - ) - - # Make the request - operation = client.create_instance(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.CreateInstance. - parent (:class:`str`): - Required. The unique name of the project in which to - create the new instance. Values are of the form - ``projects/{project}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instance_id (:class:`str`): - Required. The ID to be used when referring to the new - instance within its project, e.g., just ``myinstance`` - rather than ``projects/myproject/instances/myinstance``. - - This corresponds to the ``instance_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`): - Required. The instance to create. Fields marked - ``OutputOnly`` must be left blank. - - This corresponds to the ``instance`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - clusters (:class:`MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]`): - Required. The clusters to be created within the - instance, mapped by desired cluster ID, e.g., just - ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - Fields marked ``OutputOnly`` must be left blank. - - This corresponds to the ``clusters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, instance_id, instance, clusters] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.CreateInstanceRequest): - request = bigtable_instance_admin.CreateInstanceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if instance_id is not None: - request.instance_id = instance_id - if instance is not None: - request.instance = instance - - if clusters: - request.clusters.update(clusters) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.create_instance - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gba_instance.Instance, - metadata_type=bigtable_instance_admin.CreateInstanceMetadata, - ) - - # Done; return the response. - return response - - async def get_instance( - self, - request: Optional[ - Union[bigtable_instance_admin.GetInstanceRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.Instance: - r"""Gets information about an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_get_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetInstanceRequest( - name="name_value", - ) - - # Make the request - response = await client.get_instance(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.GetInstance. - name (:class:`str`): - Required. The unique name of the requested instance. - Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Instance: - A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.GetInstanceRequest): - request = bigtable_instance_admin.GetInstanceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.get_instance - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_instances( - self, - request: Optional[ - Union[bigtable_instance_admin.ListInstancesRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_instance_admin.ListInstancesResponse: - r"""Lists information about instances in a project. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_list_instances(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListInstancesRequest( - parent="parent_value", - ) - - # Make the request - response = await client.list_instances(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.ListInstances. - parent (:class:`str`): - Required. The unique name of the project for which a - list of instances is requested. Values are of the form - ``projects/{project}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.ListInstancesResponse: - Response message for - BigtableInstanceAdmin.ListInstances. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.ListInstancesRequest): - request = bigtable_instance_admin.ListInstancesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.list_instances - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_instance( - self, - request: Optional[Union[instance.Instance, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.Instance: - r"""Updates an instance within a project. This method - updates only the display name and type for an Instance. - To update other Instance properties, such as labels, use - PartialUpdateInstance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_update_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.Instance( - display_name="display_name_value", - ) - - # Make the request - response = await client.update_instance(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.Instance, dict]]): - The request object. A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Instance: - A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, instance.Instance): - request = instance.Instance(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.update_instance - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def partial_update_instance( - self, - request: Optional[ - Union[bigtable_instance_admin.PartialUpdateInstanceRequest, dict] - ] = None, - *, - instance: Optional[gba_instance.Instance] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Partially updates an instance within a project. This - method can modify all fields of an Instance and is the - preferred way to update an Instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_partial_update_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - instance = bigtable_admin_v2.Instance() - instance.display_name = "display_name_value" - - request = bigtable_admin_v2.PartialUpdateInstanceRequest( - instance=instance, - ) - - # Make the request - operation = client.partial_update_instance(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.PartialUpdateInstance. - instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`): - Required. The Instance which will - (partially) replace the current value. - - This corresponds to the ``instance`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The subset of Instance - fields which should be replaced. Must be - explicitly set. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [instance, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance( - request, bigtable_instance_admin.PartialUpdateInstanceRequest - ): - request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if instance is not None: - request.instance = instance - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.partial_update_instance - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("instance.name", request.instance.name),) - ), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gba_instance.Instance, - metadata_type=bigtable_instance_admin.UpdateInstanceMetadata, - ) - - # Done; return the response. - return response - - async def delete_instance( - self, - request: Optional[ - Union[bigtable_instance_admin.DeleteInstanceRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Delete an instance from a project. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_delete_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteInstanceRequest( - name="name_value", - ) - - # Make the request - await client.delete_instance(request=request) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.DeleteInstance. - name (:class:`str`): - Required. The unique name of the instance to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.DeleteInstanceRequest): - request = bigtable_instance_admin.DeleteInstanceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.delete_instance - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_cluster( - self, - request: Optional[ - Union[bigtable_instance_admin.CreateClusterRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - cluster_id: Optional[str] = None, - cluster: Optional[instance.Cluster] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a cluster within an instance. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_create_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateClusterRequest( - parent="parent_value", - cluster_id="cluster_id_value", - ) - - # Make the request - operation = client.create_cluster(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.CreateCluster. - parent (:class:`str`): - Required. The unique name of the instance in which to - create the new cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. The ID to be used when referring to the new - cluster within its instance, e.g., just ``mycluster`` - rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): - Required. The cluster to be created. Fields marked - ``OutputOnly`` must be left blank. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, cluster_id, cluster] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.CreateClusterRequest): - request = bigtable_instance_admin.CreateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if cluster_id is not None: - request.cluster_id = cluster_id - if cluster is not None: - request.cluster = cluster - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.create_cluster - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - instance.Cluster, - metadata_type=bigtable_instance_admin.CreateClusterMetadata, - ) - - # Done; return the response. - return response - - async def get_cluster( - self, - request: Optional[ - Union[bigtable_instance_admin.GetClusterRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.Cluster: - r"""Gets information about a cluster. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_get_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetClusterRequest( - name="name_value", - ) - - # Make the request - response = await client.get_cluster(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.GetCluster. - name (:class:`str`): - Required. The unique name of the requested cluster. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Cluster: - A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.GetClusterRequest): - request = bigtable_instance_admin.GetClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.get_cluster - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_clusters( - self, - request: Optional[ - Union[bigtable_instance_admin.ListClustersRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_instance_admin.ListClustersResponse: - r"""Lists information about clusters in an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_list_clusters(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListClustersRequest( - parent="parent_value", - ) - - # Make the request - response = await client.list_clusters(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.ListClusters. - parent (:class:`str`): - Required. The unique name of the instance for which a - list of clusters is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use - ``{instance} = '-'`` to list Clusters for all Instances - in a project, e.g., ``projects/myproject/instances/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.ListClustersResponse: - Response message for - BigtableInstanceAdmin.ListClusters. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.ListClustersRequest): - request = bigtable_instance_admin.ListClustersRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.list_clusters - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_cluster( - self, - request: Optional[Union[instance.Cluster, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a cluster within an instance. - - Note that UpdateCluster does not support updating - cluster_config.cluster_autoscaling_config. In order to update - it, you must use PartialUpdateCluster. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_update_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.Cluster( - ) - - # Make the request - operation = client.update_cluster(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]]): - The request object. A resizable group of nodes in a particular cloud - location, capable of serving all - [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, instance.Cluster): - request = instance.Cluster(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.update_cluster - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - instance.Cluster, - metadata_type=bigtable_instance_admin.UpdateClusterMetadata, - ) - - # Done; return the response. - return response - - async def partial_update_cluster( - self, - request: Optional[ - Union[bigtable_instance_admin.PartialUpdateClusterRequest, dict] - ] = None, - *, - cluster: Optional[instance.Cluster] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Partially updates a cluster within a project. This method is the - preferred way to update a Cluster. - - To enable and update autoscaling, set - cluster_config.cluster_autoscaling_config. When autoscaling is - enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning - that updates to it are ignored. Note that an update cannot - simultaneously set serve_nodes to non-zero and - cluster_config.cluster_autoscaling_config to non-empty, and also - specify both in the update_mask. - - To disable autoscaling, clear - cluster_config.cluster_autoscaling_config, and explicitly set a - serve_node count via the update_mask. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_partial_update_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.PartialUpdateClusterRequest( - ) - - # Make the request - operation = client.partial_update_cluster(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.PartialUpdateCluster. - cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): - Required. The Cluster which contains the partial updates - to be applied, subject to the update_mask. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The subset of Cluster - fields which should be replaced. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [cluster, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.PartialUpdateClusterRequest): - request = bigtable_instance_admin.PartialUpdateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if cluster is not None: - request.cluster = cluster - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.partial_update_cluster - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("cluster.name", request.cluster.name),) - ), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - instance.Cluster, - metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata, - ) - - # Done; return the response. - return response - - async def delete_cluster( - self, - request: Optional[ - Union[bigtable_instance_admin.DeleteClusterRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Deletes a cluster from an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_delete_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteClusterRequest( - name="name_value", - ) - - # Make the request - await client.delete_cluster(request=request) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.DeleteCluster. - name (:class:`str`): - Required. The unique name of the cluster to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.DeleteClusterRequest): - request = bigtable_instance_admin.DeleteClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.delete_cluster - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_app_profile( - self, - request: Optional[ - Union[bigtable_instance_admin.CreateAppProfileRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - app_profile_id: Optional[str] = None, - app_profile: Optional[instance.AppProfile] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.AppProfile: - r"""Creates an app profile within an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_create_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - app_profile = bigtable_admin_v2.AppProfile() - app_profile.priority = "PRIORITY_HIGH" - - request = bigtable_admin_v2.CreateAppProfileRequest( - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=app_profile, - ) - - # Make the request - response = await client.create_app_profile(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.CreateAppProfile. - parent (:class:`str`): - Required. The unique name of the instance in which to - create the new app profile. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (:class:`str`): - Required. The ID to be used when referring to the new - app profile within its instance, e.g., just - ``myprofile`` rather than - ``projects/myproject/instances/myinstance/appProfiles/myprofile``. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`): - Required. The app profile to be created. Fields marked - ``OutputOnly`` will be ignored. - - This corresponds to the ``app_profile`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.AppProfile: - A configuration object describing how - Cloud Bigtable should treat traffic from - a particular end user application. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, app_profile_id, app_profile] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.CreateAppProfileRequest): - request = bigtable_instance_admin.CreateAppProfileRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if app_profile_id is not None: - request.app_profile_id = app_profile_id - if app_profile is not None: - request.app_profile = app_profile - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.create_app_profile - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_app_profile( - self, - request: Optional[ - Union[bigtable_instance_admin.GetAppProfileRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.AppProfile: - r"""Gets information about an app profile. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_get_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetAppProfileRequest( - name="name_value", - ) - - # Make the request - response = await client.get_app_profile(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.GetAppProfile. - name (:class:`str`): - Required. The unique name of the requested app profile. - Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.AppProfile: - A configuration object describing how - Cloud Bigtable should treat traffic from - a particular end user application. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.GetAppProfileRequest): - request = bigtable_instance_admin.GetAppProfileRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.get_app_profile - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_app_profiles( - self, - request: Optional[ - Union[bigtable_instance_admin.ListAppProfilesRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListAppProfilesAsyncPager: - r"""Lists information about app profiles in an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_list_app_profiles(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListAppProfilesRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_app_profiles(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.ListAppProfiles. - parent (:class:`str`): - Required. The unique name of the instance for which a - list of app profiles is requested. Values are of the - form ``projects/{project}/instances/{instance}``. Use - ``{instance} = '-'`` to list AppProfiles for all - Instances in a project, e.g., - ``projects/myproject/instances/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager: - Response message for - BigtableInstanceAdmin.ListAppProfiles. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.ListAppProfilesRequest): - request = bigtable_instance_admin.ListAppProfilesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.list_app_profiles - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListAppProfilesAsyncPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_app_profile( - self, - request: Optional[ - Union[bigtable_instance_admin.UpdateAppProfileRequest, dict] - ] = None, - *, - app_profile: Optional[instance.AppProfile] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates an app profile within an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_update_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - app_profile = bigtable_admin_v2.AppProfile() - app_profile.priority = "PRIORITY_HIGH" - - request = bigtable_admin_v2.UpdateAppProfileRequest( - app_profile=app_profile, - ) - - # Make the request - operation = client.update_app_profile(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.UpdateAppProfile. - app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`): - Required. The app profile which will - (partially) replace the current value. - - This corresponds to the ``app_profile`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The subset of app profile - fields which should be replaced. If - unset, all fields will be replaced. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AppProfile` A configuration object describing how Cloud Bigtable should treat traffic - from a particular end user application. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [app_profile, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.UpdateAppProfileRequest): - request = bigtable_instance_admin.UpdateAppProfileRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if app_profile is not None: - request.app_profile = app_profile - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.update_app_profile - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("app_profile.name", request.app_profile.name),) - ), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - instance.AppProfile, - metadata_type=bigtable_instance_admin.UpdateAppProfileMetadata, - ) - - # Done; return the response. - return response - - async def delete_app_profile( - self, - request: Optional[ - Union[bigtable_instance_admin.DeleteAppProfileRequest, dict] - ] = None, - *, - name: Optional[str] = None, - ignore_warnings: Optional[bool] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Deletes an app profile from an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_delete_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteAppProfileRequest( - name="name_value", - ignore_warnings=True, - ) - - # Make the request - await client.delete_app_profile(request=request) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.DeleteAppProfile. - name (:class:`str`): - Required. The unique name of the app profile to be - deleted. Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - ignore_warnings (:class:`bool`): - Required. If true, ignore safety - checks when deleting the app profile. - - This corresponds to the ``ignore_warnings`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name, ignore_warnings] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.DeleteAppProfileRequest): - request = bigtable_instance_admin.DeleteAppProfileRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if ignore_warnings is not None: - request.ignore_warnings = ignore_warnings - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.delete_app_profile - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def get_iam_policy( - self, - request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> policy_pb2.Policy: - r"""Gets the access control policy for an instance - resource. Returns an empty policy if an instance exists - but does not have a policy set. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - from google.iam.v1 import iam_policy_pb2 # type: ignore - - async def sample_get_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = iam_policy_pb2.GetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = await client.get_iam_policy(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): - The request object. Request message for ``GetIamPolicy`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [resource] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.GetIamPolicyRequest(**request) - elif not request: - request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.get_iam_policy - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_iam_policy( - self, - request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> policy_pb2.Policy: - r"""Sets the access control policy on an instance - resource. Replaces any existing policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - from google.iam.v1 import iam_policy_pb2 # type: ignore - - async def sample_set_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = iam_policy_pb2.SetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = await client.set_iam_policy(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): - The request object. Request message for ``SetIamPolicy`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [resource] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.SetIamPolicyRequest(**request) - elif not request: - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.set_iam_policy - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def test_iam_permissions( - self, - request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, - *, - resource: Optional[str] = None, - permissions: Optional[MutableSequence[str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Returns permissions that the caller has on the - specified instance resource. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - from google.iam.v1 import iam_policy_pb2 # type: ignore - - async def sample_test_iam_permissions(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = iam_policy_pb2.TestIamPermissionsRequest( - resource="resource_value", - permissions=['permissions_value1', 'permissions_value2'], - ) - - # Make the request - response = await client.test_iam_permissions(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): - The request object. Request message for ``TestIamPermissions`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - permissions (:class:`MutableSequence[str]`): - The set of permissions to check for the ``resource``. - Permissions with wildcards (such as '*' or 'storage.*') - are not allowed. For more information see `IAM - Overview `__. - - This corresponds to the ``permissions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: - Response message for TestIamPermissions method. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [resource, permissions] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.TestIamPermissionsRequest(**request) - elif not request: - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.test_iam_permissions - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_hot_tablets( - self, - request: Optional[ - Union[bigtable_instance_admin.ListHotTabletsRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListHotTabletsAsyncPager: - r"""Lists hot tablets in a cluster, within the time range - provided. Hot tablets are ordered based on CPU usage. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_list_hot_tablets(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListHotTabletsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_hot_tablets(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.ListHotTablets. - parent (:class:`str`): - Required. The cluster name to list hot tablets. Value is - in the following form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsAsyncPager: - Response message for - BigtableInstanceAdmin.ListHotTablets. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.ListHotTabletsRequest): - request = bigtable_instance_admin.ListHotTabletsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.list_hot_tablets - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListHotTabletsAsyncPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_logical_view( - self, - request: Optional[ - Union[bigtable_instance_admin.CreateLogicalViewRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - logical_view: Optional[instance.LogicalView] = None, - logical_view_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a logical view within an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_create_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - logical_view = bigtable_admin_v2.LogicalView() - logical_view.query = "query_value" - - request = bigtable_admin_v2.CreateLogicalViewRequest( - parent="parent_value", - logical_view_id="logical_view_id_value", - logical_view=logical_view, - ) - - # Make the request - operation = client.create_logical_view(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.CreateLogicalView. - parent (:class:`str`): - Required. The parent instance where this logical view - will be created. Format: - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - logical_view (:class:`google.cloud.bigtable_admin_v2.types.LogicalView`): - Required. The logical view to create. - This corresponds to the ``logical_view`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - logical_view_id (:class:`str`): - Required. The ID to use for the - logical view, which will become the - final component of the logical view's - resource name. - - This corresponds to the ``logical_view_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.LogicalView` - A SQL logical view object that can be referenced in SQL - queries. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, logical_view, logical_view_id] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.CreateLogicalViewRequest): - request = bigtable_instance_admin.CreateLogicalViewRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if logical_view is not None: - request.logical_view = logical_view - if logical_view_id is not None: - request.logical_view_id = logical_view_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.create_logical_view - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - instance.LogicalView, - metadata_type=bigtable_instance_admin.CreateLogicalViewMetadata, - ) - - # Done; return the response. - return response - - async def get_logical_view( - self, - request: Optional[ - Union[bigtable_instance_admin.GetLogicalViewRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.LogicalView: - r"""Gets information about a logical view. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_get_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetLogicalViewRequest( - name="name_value", - ) - - # Make the request - response = await client.get_logical_view(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.GetLogicalView. - name (:class:`str`): - Required. The unique name of the requested logical view. - Values are of the form - ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.LogicalView: - A SQL logical view object that can be - referenced in SQL queries. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.GetLogicalViewRequest): - request = bigtable_instance_admin.GetLogicalViewRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.get_logical_view - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_logical_views( - self, - request: Optional[ - Union[bigtable_instance_admin.ListLogicalViewsRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListLogicalViewsAsyncPager: - r"""Lists information about logical views in an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_list_logical_views(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListLogicalViewsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_logical_views(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.ListLogicalViews. - parent (:class:`str`): - Required. The unique name of the instance for which the - list of logical views is requested. Values are of the - form ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsAsyncPager: - Response message for - BigtableInstanceAdmin.ListLogicalViews. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.ListLogicalViewsRequest): - request = bigtable_instance_admin.ListLogicalViewsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.list_logical_views - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListLogicalViewsAsyncPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_logical_view( - self, - request: Optional[ - Union[bigtable_instance_admin.UpdateLogicalViewRequest, dict] - ] = None, - *, - logical_view: Optional[instance.LogicalView] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a logical view within an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_update_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - logical_view = bigtable_admin_v2.LogicalView() - logical_view.query = "query_value" - - request = bigtable_admin_v2.UpdateLogicalViewRequest( - logical_view=logical_view, - ) - - # Make the request - operation = client.update_logical_view(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.UpdateLogicalView. - logical_view (:class:`google.cloud.bigtable_admin_v2.types.LogicalView`): - Required. The logical view to update. - - The logical view's ``name`` field is used to identify - the view to update. Format: - ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. - - This corresponds to the ``logical_view`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Optional. The list of fields to - update. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.LogicalView` - A SQL logical view object that can be referenced in SQL - queries. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [logical_view, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.UpdateLogicalViewRequest): - request = bigtable_instance_admin.UpdateLogicalViewRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if logical_view is not None: - request.logical_view = logical_view - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.update_logical_view - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("logical_view.name", request.logical_view.name),) - ), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - instance.LogicalView, - metadata_type=bigtable_instance_admin.UpdateLogicalViewMetadata, - ) - - # Done; return the response. - return response - - async def delete_logical_view( - self, - request: Optional[ - Union[bigtable_instance_admin.DeleteLogicalViewRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Deletes a logical view from an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_delete_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteLogicalViewRequest( - name="name_value", - ) - - # Make the request - await client.delete_logical_view(request=request) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.DeleteLogicalView. - name (:class:`str`): - Required. The unique name of the logical view to be - deleted. Format: - ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.DeleteLogicalViewRequest): - request = bigtable_instance_admin.DeleteLogicalViewRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.delete_logical_view - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_materialized_view( - self, - request: Optional[ - Union[bigtable_instance_admin.CreateMaterializedViewRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - materialized_view: Optional[instance.MaterializedView] = None, - materialized_view_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a materialized view within an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_create_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - materialized_view = bigtable_admin_v2.MaterializedView() - materialized_view.query = "query_value" - - request = bigtable_admin_v2.CreateMaterializedViewRequest( - parent="parent_value", - materialized_view_id="materialized_view_id_value", - materialized_view=materialized_view, - ) - - # Make the request - operation = client.create_materialized_view(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.CreateMaterializedView. - parent (:class:`str`): - Required. The parent instance where this materialized - view will be created. Format: - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - materialized_view (:class:`google.cloud.bigtable_admin_v2.types.MaterializedView`): - Required. The materialized view to - create. - - This corresponds to the ``materialized_view`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - materialized_view_id (:class:`str`): - Required. The ID to use for the - materialized view, which will become the - final component of the materialized - view's resource name. - - This corresponds to the ``materialized_view_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.MaterializedView` - A materialized view object that can be referenced in SQL - queries. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, materialized_view, materialized_view_id] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance( - request, bigtable_instance_admin.CreateMaterializedViewRequest - ): - request = bigtable_instance_admin.CreateMaterializedViewRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if materialized_view is not None: - request.materialized_view = materialized_view - if materialized_view_id is not None: - request.materialized_view_id = materialized_view_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.create_materialized_view - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - instance.MaterializedView, - metadata_type=bigtable_instance_admin.CreateMaterializedViewMetadata, - ) - - # Done; return the response. - return response - - async def get_materialized_view( - self, - request: Optional[ - Union[bigtable_instance_admin.GetMaterializedViewRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.MaterializedView: - r"""Gets information about a materialized view. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_get_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetMaterializedViewRequest( - name="name_value", - ) - - # Make the request - response = await client.get_materialized_view(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.GetMaterializedView. - name (:class:`str`): - Required. The unique name of the requested materialized - view. Values are of the form - ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.MaterializedView: - A materialized view object that can - be referenced in SQL queries. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.GetMaterializedViewRequest): - request = bigtable_instance_admin.GetMaterializedViewRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.get_materialized_view - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_materialized_views( - self, - request: Optional[ - Union[bigtable_instance_admin.ListMaterializedViewsRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListMaterializedViewsAsyncPager: - r"""Lists information about materialized views in an - instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_list_materialized_views(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListMaterializedViewsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_materialized_views(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.ListMaterializedViews. - parent (:class:`str`): - Required. The unique name of the instance for which the - list of materialized views is requested. Values are of - the form ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsAsyncPager: - Response message for - BigtableInstanceAdmin.ListMaterializedViews. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance( - request, bigtable_instance_admin.ListMaterializedViewsRequest - ): - request = bigtable_instance_admin.ListMaterializedViewsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.list_materialized_views - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListMaterializedViewsAsyncPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_materialized_view( - self, - request: Optional[ - Union[bigtable_instance_admin.UpdateMaterializedViewRequest, dict] - ] = None, - *, - materialized_view: Optional[instance.MaterializedView] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a materialized view within an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_update_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - materialized_view = bigtable_admin_v2.MaterializedView() - materialized_view.query = "query_value" - - request = bigtable_admin_v2.UpdateMaterializedViewRequest( - materialized_view=materialized_view, - ) - - # Make the request - operation = client.update_materialized_view(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.UpdateMaterializedView. - materialized_view (:class:`google.cloud.bigtable_admin_v2.types.MaterializedView`): - Required. The materialized view to update. - - The materialized view's ``name`` field is used to - identify the view to update. Format: - ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. - - This corresponds to the ``materialized_view`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Optional. The list of fields to - update. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.MaterializedView` - A materialized view object that can be referenced in SQL - queries. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [materialized_view, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance( - request, bigtable_instance_admin.UpdateMaterializedViewRequest - ): - request = bigtable_instance_admin.UpdateMaterializedViewRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if materialized_view is not None: - request.materialized_view = materialized_view - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.update_materialized_view - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("materialized_view.name", request.materialized_view.name),) - ), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - instance.MaterializedView, - metadata_type=bigtable_instance_admin.UpdateMaterializedViewMetadata, - ) - - # Done; return the response. - return response - - async def delete_materialized_view( - self, - request: Optional[ - Union[bigtable_instance_admin.DeleteMaterializedViewRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Deletes a materialized view from an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_delete_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteMaterializedViewRequest( - name="name_value", - ) - - # Make the request - await client.delete_materialized_view(request=request) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.DeleteMaterializedView. - name (:class:`str`): - Required. The unique name of the materialized view to be - deleted. Format: - ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance( - request, bigtable_instance_admin.DeleteMaterializedViewRequest - ): - request = bigtable_instance_admin.DeleteMaterializedViewRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.delete_materialized_view - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def __aenter__(self) -> "BigtableInstanceAdminAsyncClient": - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=package_version.__version__ -) - -if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER - DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ - - -__all__ = ("BigtableInstanceAdminAsyncClient",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py deleted file mode 100644 index 84df01058..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ /dev/null @@ -1,4818 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from http import HTTPStatus -import json -import logging as std_logging -import os -import re -from typing import ( - Dict, - Callable, - Mapping, - MutableMapping, - MutableSequence, - Optional, - Sequence, - Tuple, - Type, - Union, - cast, -) -import warnings - -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore -import google.protobuf - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - -try: - from google.api_core import client_logging # type: ignore - - CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER -except ImportError: # pragma: NO COVER - CLIENT_LOGGING_SUPPORTED = False - -_LOGGER = std_logging.getLogger(__name__) - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import common -from google.cloud.bigtable_admin_v2.types import instance -from google.cloud.bigtable_admin_v2.types import instance as gba_instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import BigtableInstanceAdminGrpcTransport -from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport -from .transports.rest import BigtableInstanceAdminRestTransport - - -class BigtableInstanceAdminClientMeta(type): - """Metaclass for the BigtableInstanceAdmin client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - - _transport_registry = ( - OrderedDict() - ) # type: Dict[str, Type[BigtableInstanceAdminTransport]] - _transport_registry["grpc"] = BigtableInstanceAdminGrpcTransport - _transport_registry["grpc_asyncio"] = BigtableInstanceAdminGrpcAsyncIOTransport - _transport_registry["rest"] = BigtableInstanceAdminRestTransport - - def get_transport_class( - cls, - label: Optional[str] = None, - ) -> Type[BigtableInstanceAdminTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class BigtableInstanceAdminClient(metaclass=BigtableInstanceAdminClientMeta): - """Service for creating, configuring, and deleting Cloud - Bigtable Instances and Clusters. Provides access to the Instance - and Cluster schemas only, not the tables' metadata or data - stored in those tables. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. - DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - _DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}" - _DEFAULT_UNIVERSE = "googleapis.com" - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableInstanceAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableInstanceAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> BigtableInstanceAdminTransport: - """Returns the transport used by the client instance. - - Returns: - BigtableInstanceAdminTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def app_profile_path( - project: str, - instance: str, - app_profile: str, - ) -> str: - """Returns a fully-qualified app_profile string.""" - return ( - "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format( - project=project, - instance=instance, - app_profile=app_profile, - ) - ) - - @staticmethod - def parse_app_profile_path(path: str) -> Dict[str, str]: - """Parses a app_profile path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/instances/(?P.+?)/appProfiles/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def cluster_path( - project: str, - instance: str, - cluster: str, - ) -> str: - """Returns a fully-qualified cluster string.""" - return "projects/{project}/instances/{instance}/clusters/{cluster}".format( - project=project, - instance=instance, - cluster=cluster, - ) - - @staticmethod - def parse_cluster_path(path: str) -> Dict[str, str]: - """Parses a cluster path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def crypto_key_path( - project: str, - location: str, - key_ring: str, - crypto_key: str, - ) -> str: - """Returns a fully-qualified crypto_key string.""" - return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format( - project=project, - location=location, - key_ring=key_ring, - crypto_key=crypto_key, - ) - - @staticmethod - def parse_crypto_key_path(path: str) -> Dict[str, str]: - """Parses a crypto_key path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def hot_tablet_path( - project: str, - instance: str, - cluster: str, - hot_tablet: str, - ) -> str: - """Returns a fully-qualified hot_tablet string.""" - return "projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/{hot_tablet}".format( - project=project, - instance=instance, - cluster=cluster, - hot_tablet=hot_tablet, - ) - - @staticmethod - def parse_hot_tablet_path(path: str) -> Dict[str, str]: - """Parses a hot_tablet path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/hotTablets/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def instance_path( - project: str, - instance: str, - ) -> str: - """Returns a fully-qualified instance string.""" - return "projects/{project}/instances/{instance}".format( - project=project, - instance=instance, - ) - - @staticmethod - def parse_instance_path(path: str) -> Dict[str, str]: - """Parses a instance path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def logical_view_path( - project: str, - instance: str, - logical_view: str, - ) -> str: - """Returns a fully-qualified logical_view string.""" - return "projects/{project}/instances/{instance}/logicalViews/{logical_view}".format( - project=project, - instance=instance, - logical_view=logical_view, - ) - - @staticmethod - def parse_logical_view_path(path: str) -> Dict[str, str]: - """Parses a logical_view path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/instances/(?P.+?)/logicalViews/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def materialized_view_path( - project: str, - instance: str, - materialized_view: str, - ) -> str: - """Returns a fully-qualified materialized_view string.""" - return "projects/{project}/instances/{instance}/materializedViews/{materialized_view}".format( - project=project, - instance=instance, - materialized_view=materialized_view, - ) - - @staticmethod - def parse_materialized_view_path(path: str) -> Dict[str, str]: - """Parses a materialized_view path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/instances/(?P.+?)/materializedViews/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def table_path( - project: str, - instance: str, - table: str, - ) -> str: - """Returns a fully-qualified table string.""" - return "projects/{project}/instances/{instance}/tables/{table}".format( - project=project, - instance=instance, - table=table, - ) - - @staticmethod - def parse_table_path(path: str) -> Dict[str, str]: - """Parses a table path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path( - billing_account: str, - ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format( - billing_account=billing_account, - ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str, str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path( - folder: str, - ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format( - folder=folder, - ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str, str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path( - organization: str, - ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format( - organization=organization, - ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str, str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path( - project: str, - ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format( - project=project, - ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str, str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path( - project: str, - location: str, - ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format( - project=project, - location=location, - ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str, str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @classmethod - def get_mtls_endpoint_and_cert_source( - cls, client_options: Optional[client_options_lib.ClientOptions] = None - ): - """Deprecated. Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - - warnings.warn( - "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", - DeprecationWarning, - ) - if client_options is None: - client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError( - "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - ) - - # Figure out the client cert source to use. - client_cert_source = None - if use_client_cert == "true": - if client_options.client_cert_source: - client_cert_source = client_options.client_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - elif use_mtls_endpoint == "always" or ( - use_mtls_endpoint == "auto" and client_cert_source - ): - api_endpoint = cls.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = cls.DEFAULT_ENDPOINT - - return api_endpoint, client_cert_source - - @staticmethod - def _read_environment_variables(): - """Returns the environment variables used by the client. - - Returns: - Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, - GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. - - Raises: - ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not - any of ["true", "false"]. - google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT - is not any of ["auto", "never", "always"]. - """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() - universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError( - "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env - - @staticmethod - def _get_client_cert_source(provided_cert_source, use_cert_flag): - """Return the client cert source to be used by the client. - - Args: - provided_cert_source (bytes): The client certificate source provided. - use_cert_flag (bool): A flag indicating whether to use the client certificate. - - Returns: - bytes or None: The client cert source to be used by the client. - """ - client_cert_source = None - if use_cert_flag: - if provided_cert_source: - client_cert_source = provided_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - return client_cert_source - - @staticmethod - def _get_api_endpoint( - api_override, client_cert_source, universe_domain, use_mtls_endpoint - ): - """Return the API endpoint used by the client. - - Args: - api_override (str): The API endpoint override. If specified, this is always - the return value of this function and the other arguments are not used. - client_cert_source (bytes): The client certificate source used by the client. - universe_domain (str): The universe domain used by the client. - use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. - Possible values are "always", "auto", or "never". - - Returns: - str: The API endpoint to be used by the client. - """ - if api_override is not None: - api_endpoint = api_override - elif use_mtls_endpoint == "always" or ( - use_mtls_endpoint == "auto" and client_cert_source - ): - _default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE - if universe_domain != _default_universe: - raise MutualTLSChannelError( - f"mTLS is not supported in any universe other than {_default_universe}." - ) - api_endpoint = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = ( - BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=universe_domain - ) - ) - return api_endpoint - - @staticmethod - def _get_universe_domain( - client_universe_domain: Optional[str], universe_domain_env: Optional[str] - ) -> str: - """Return the universe domain used by the client. - - Args: - client_universe_domain (Optional[str]): The universe domain configured via the client options. - universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. - - Returns: - str: The universe domain to be used by the client. - - Raises: - ValueError: If the universe domain is an empty string. - """ - universe_domain = BigtableInstanceAdminClient._DEFAULT_UNIVERSE - if client_universe_domain is not None: - universe_domain = client_universe_domain - elif universe_domain_env is not None: - universe_domain = universe_domain_env - if len(universe_domain.strip()) == 0: - raise ValueError("Universe Domain cannot be an empty string.") - return universe_domain - - def _validate_universe_domain(self): - """Validates client's and credentials' universe domains are consistent. - - Returns: - bool: True iff the configured universe domain is valid. - - Raises: - ValueError: If the configured universe domain is not valid. - """ - - # NOTE (b/349488459): universe validation is disabled until further notice. - return True - - def _add_cred_info_for_auth_errors( - self, error: core_exceptions.GoogleAPICallError - ) -> None: - """Adds credential info string to error details for 401/403/404 errors. - - Args: - error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. - """ - if error.code not in [ - HTTPStatus.UNAUTHORIZED, - HTTPStatus.FORBIDDEN, - HTTPStatus.NOT_FOUND, - ]: - return - - cred = self._transport._credentials - - # get_cred_info is only available in google-auth>=2.35.0 - if not hasattr(cred, "get_cred_info"): - return - - # ignore the type check since pypy test fails when get_cred_info - # is not available - cred_info = cred.get_cred_info() # type: ignore - if cred_info and hasattr(error._details, "append"): - error._details.append(json.dumps(cred_info)) - - @property - def api_endpoint(self): - """Return the API endpoint used by the client instance. - - Returns: - str: The API endpoint used by the client instance. - """ - return self._api_endpoint - - @property - def universe_domain(self) -> str: - """Return the universe domain used by the client instance. - - Returns: - str: The universe domain used by the client instance. - """ - return self._universe_domain - - def __init__( - self, - *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[ - Union[ - str, - BigtableInstanceAdminTransport, - Callable[..., BigtableInstanceAdminTransport], - ] - ] = None, - client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the bigtable instance admin client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Optional[Union[str,BigtableInstanceAdminTransport,Callable[..., BigtableInstanceAdminTransport]]]): - The transport to use, or a Callable that constructs and returns a new transport. - If a Callable is given, it will be called with the same set of initialization - arguments as used in the BigtableInstanceAdminTransport constructor. - If set to None, a transport is chosen automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): - Custom options for the client. - - 1. The ``api_endpoint`` property can be used to override the - default endpoint provided by the client when ``transport`` is - not explicitly provided. Only if this property is not set and - ``transport`` was not explicitly provided, the endpoint is - determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment - variable, which have one of the following values: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto-switch to the - default mTLS endpoint if client certificate is present; this is - the default value). - - 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide a client certificate for mTLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - 3. The ``universe_domain`` property can be used to override the - default "googleapis.com" universe. Note that the ``api_endpoint`` - property still takes precedence; and ``universe_domain`` is - currently not supported for mTLS. - - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client_options = client_options - if isinstance(self._client_options, dict): - self._client_options = client_options_lib.from_dict(self._client_options) - if self._client_options is None: - self._client_options = client_options_lib.ClientOptions() - self._client_options = cast( - client_options_lib.ClientOptions, self._client_options - ) - - universe_domain_opt = getattr(self._client_options, "universe_domain", None) - - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = BigtableInstanceAdminClient._read_environment_variables() - self._client_cert_source = BigtableInstanceAdminClient._get_client_cert_source( - self._client_options.client_cert_source, self._use_client_cert - ) - self._universe_domain = BigtableInstanceAdminClient._get_universe_domain( - universe_domain_opt, self._universe_domain_env - ) - self._api_endpoint = None # updated below, depending on `transport` - - # Initialize the universe domain validation. - self._is_universe_domain_valid = False - - if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER - # Setup logging. - client_logging.initialize_logging() - - api_key_value = getattr(self._client_options, "api_key", None) - if api_key_value and credentials: - raise ValueError( - "client_options.api_key and credentials are mutually exclusive" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - transport_provided = isinstance(transport, BigtableInstanceAdminTransport) - if transport_provided: - # transport is a BigtableInstanceAdminTransport instance. - if credentials or self._client_options.credentials_file or api_key_value: - raise ValueError( - "When providing a transport instance, " - "provide its credentials directly." - ) - if self._client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = cast(BigtableInstanceAdminTransport, transport) - self._api_endpoint = self._transport.host - - self._api_endpoint = ( - self._api_endpoint - or BigtableInstanceAdminClient._get_api_endpoint( - self._client_options.api_endpoint, - self._client_cert_source, - self._universe_domain, - self._use_mtls_endpoint, - ) - ) - - if not transport_provided: - import google.auth._default # type: ignore - - if api_key_value and hasattr( - google.auth._default, "get_api_key_credentials" - ): - credentials = google.auth._default.get_api_key_credentials( - api_key_value - ) - - transport_init: Union[ - Type[BigtableInstanceAdminTransport], - Callable[..., BigtableInstanceAdminTransport], - ] = ( - BigtableInstanceAdminClient.get_transport_class(transport) - if isinstance(transport, str) or transport is None - else cast(Callable[..., BigtableInstanceAdminTransport], transport) - ) - # initialize with the provided callable or the passed in class - self._transport = transport_init( - credentials=credentials, - credentials_file=self._client_options.credentials_file, - host=self._api_endpoint, - scopes=self._client_options.scopes, - client_cert_source_for_mtls=self._client_cert_source, - quota_project_id=self._client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - api_audience=self._client_options.api_audience, - ) - - if "async" not in str(self._transport): - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - std_logging.DEBUG - ): # pragma: NO COVER - _LOGGER.debug( - "Created client `google.bigtable.admin_v2.BigtableInstanceAdminClient`.", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "credentialsType": None, - }, - ) - - def create_instance( - self, - request: Optional[ - Union[bigtable_instance_admin.CreateInstanceRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - instance_id: Optional[str] = None, - instance: Optional[gba_instance.Instance] = None, - clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Create an instance within a project. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_create_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - instance = bigtable_admin_v2.Instance() - instance.display_name = "display_name_value" - - request = bigtable_admin_v2.CreateInstanceRequest( - parent="parent_value", - instance_id="instance_id_value", - instance=instance, - ) - - # Make the request - operation = client.create_instance(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.CreateInstance. - parent (str): - Required. The unique name of the project in which to - create the new instance. Values are of the form - ``projects/{project}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instance_id (str): - Required. The ID to be used when referring to the new - instance within its project, e.g., just ``myinstance`` - rather than ``projects/myproject/instances/myinstance``. - - This corresponds to the ``instance_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instance (google.cloud.bigtable_admin_v2.types.Instance): - Required. The instance to create. Fields marked - ``OutputOnly`` must be left blank. - - This corresponds to the ``instance`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - clusters (MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]): - Required. The clusters to be created within the - instance, mapped by desired cluster ID, e.g., just - ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - Fields marked ``OutputOnly`` must be left blank. - - This corresponds to the ``clusters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, instance_id, instance, clusters] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.CreateInstanceRequest): - request = bigtable_instance_admin.CreateInstanceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if instance_id is not None: - request.instance_id = instance_id - if instance is not None: - request.instance = instance - if clusters is not None: - request.clusters = clusters - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_instance] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - gba_instance.Instance, - metadata_type=bigtable_instance_admin.CreateInstanceMetadata, - ) - - # Done; return the response. - return response - - def get_instance( - self, - request: Optional[ - Union[bigtable_instance_admin.GetInstanceRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.Instance: - r"""Gets information about an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_get_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetInstanceRequest( - name="name_value", - ) - - # Make the request - response = client.get_instance(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.GetInstance. - name (str): - Required. The unique name of the requested instance. - Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Instance: - A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.GetInstanceRequest): - request = bigtable_instance_admin.GetInstanceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_instance] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_instances( - self, - request: Optional[ - Union[bigtable_instance_admin.ListInstancesRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_instance_admin.ListInstancesResponse: - r"""Lists information about instances in a project. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_list_instances(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListInstancesRequest( - parent="parent_value", - ) - - # Make the request - response = client.list_instances(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.ListInstances. - parent (str): - Required. The unique name of the project for which a - list of instances is requested. Values are of the form - ``projects/{project}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.ListInstancesResponse: - Response message for - BigtableInstanceAdmin.ListInstances. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.ListInstancesRequest): - request = bigtable_instance_admin.ListInstancesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_instances] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_instance( - self, - request: Optional[Union[instance.Instance, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.Instance: - r"""Updates an instance within a project. This method - updates only the display name and type for an Instance. - To update other Instance properties, such as labels, use - PartialUpdateInstance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_update_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.Instance( - display_name="display_name_value", - ) - - # Make the request - response = client.update_instance(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.Instance, dict]): - The request object. A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Instance: - A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, instance.Instance): - request = instance.Instance(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_instance] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def partial_update_instance( - self, - request: Optional[ - Union[bigtable_instance_admin.PartialUpdateInstanceRequest, dict] - ] = None, - *, - instance: Optional[gba_instance.Instance] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Partially updates an instance within a project. This - method can modify all fields of an Instance and is the - preferred way to update an Instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_partial_update_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - instance = bigtable_admin_v2.Instance() - instance.display_name = "display_name_value" - - request = bigtable_admin_v2.PartialUpdateInstanceRequest( - instance=instance, - ) - - # Make the request - operation = client.partial_update_instance(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.PartialUpdateInstance. - instance (google.cloud.bigtable_admin_v2.types.Instance): - Required. The Instance which will - (partially) replace the current value. - - This corresponds to the ``instance`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The subset of Instance - fields which should be replaced. Must be - explicitly set. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [instance, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance( - request, bigtable_instance_admin.PartialUpdateInstanceRequest - ): - request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if instance is not None: - request.instance = instance - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.partial_update_instance] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("instance.name", request.instance.name),) - ), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - gba_instance.Instance, - metadata_type=bigtable_instance_admin.UpdateInstanceMetadata, - ) - - # Done; return the response. - return response - - def delete_instance( - self, - request: Optional[ - Union[bigtable_instance_admin.DeleteInstanceRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Delete an instance from a project. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_delete_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteInstanceRequest( - name="name_value", - ) - - # Make the request - client.delete_instance(request=request) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.DeleteInstance. - name (str): - Required. The unique name of the instance to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.DeleteInstanceRequest): - request = bigtable_instance_admin.DeleteInstanceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_instance] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_cluster( - self, - request: Optional[ - Union[bigtable_instance_admin.CreateClusterRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - cluster_id: Optional[str] = None, - cluster: Optional[instance.Cluster] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Creates a cluster within an instance. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_create_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateClusterRequest( - parent="parent_value", - cluster_id="cluster_id_value", - ) - - # Make the request - operation = client.create_cluster(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.CreateCluster. - parent (str): - Required. The unique name of the instance in which to - create the new cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. The ID to be used when referring to the new - cluster within its instance, e.g., just ``mycluster`` - rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (google.cloud.bigtable_admin_v2.types.Cluster): - Required. The cluster to be created. Fields marked - ``OutputOnly`` must be left blank. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, cluster_id, cluster] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.CreateClusterRequest): - request = bigtable_instance_admin.CreateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if cluster_id is not None: - request.cluster_id = cluster_id - if cluster is not None: - request.cluster = cluster - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - instance.Cluster, - metadata_type=bigtable_instance_admin.CreateClusterMetadata, - ) - - # Done; return the response. - return response - - def get_cluster( - self, - request: Optional[ - Union[bigtable_instance_admin.GetClusterRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.Cluster: - r"""Gets information about a cluster. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_get_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetClusterRequest( - name="name_value", - ) - - # Make the request - response = client.get_cluster(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.GetCluster. - name (str): - Required. The unique name of the requested cluster. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Cluster: - A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.GetClusterRequest): - request = bigtable_instance_admin.GetClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_clusters( - self, - request: Optional[ - Union[bigtable_instance_admin.ListClustersRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_instance_admin.ListClustersResponse: - r"""Lists information about clusters in an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_list_clusters(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListClustersRequest( - parent="parent_value", - ) - - # Make the request - response = client.list_clusters(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.ListClusters. - parent (str): - Required. The unique name of the instance for which a - list of clusters is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use - ``{instance} = '-'`` to list Clusters for all Instances - in a project, e.g., ``projects/myproject/instances/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.ListClustersResponse: - Response message for - BigtableInstanceAdmin.ListClusters. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.ListClustersRequest): - request = bigtable_instance_admin.ListClustersRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_clusters] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_cluster( - self, - request: Optional[Union[instance.Cluster, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Updates a cluster within an instance. - - Note that UpdateCluster does not support updating - cluster_config.cluster_autoscaling_config. In order to update - it, you must use PartialUpdateCluster. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_update_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.Cluster( - ) - - # Make the request - operation = client.update_cluster(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): - The request object. A resizable group of nodes in a particular cloud - location, capable of serving all - [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, instance.Cluster): - request = instance.Cluster(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - instance.Cluster, - metadata_type=bigtable_instance_admin.UpdateClusterMetadata, - ) - - # Done; return the response. - return response - - def partial_update_cluster( - self, - request: Optional[ - Union[bigtable_instance_admin.PartialUpdateClusterRequest, dict] - ] = None, - *, - cluster: Optional[instance.Cluster] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Partially updates a cluster within a project. This method is the - preferred way to update a Cluster. - - To enable and update autoscaling, set - cluster_config.cluster_autoscaling_config. When autoscaling is - enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning - that updates to it are ignored. Note that an update cannot - simultaneously set serve_nodes to non-zero and - cluster_config.cluster_autoscaling_config to non-empty, and also - specify both in the update_mask. - - To disable autoscaling, clear - cluster_config.cluster_autoscaling_config, and explicitly set a - serve_node count via the update_mask. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_partial_update_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.PartialUpdateClusterRequest( - ) - - # Make the request - operation = client.partial_update_cluster(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.PartialUpdateCluster. - cluster (google.cloud.bigtable_admin_v2.types.Cluster): - Required. The Cluster which contains the partial updates - to be applied, subject to the update_mask. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The subset of Cluster - fields which should be replaced. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [cluster, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.PartialUpdateClusterRequest): - request = bigtable_instance_admin.PartialUpdateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if cluster is not None: - request.cluster = cluster - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.partial_update_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("cluster.name", request.cluster.name),) - ), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - instance.Cluster, - metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata, - ) - - # Done; return the response. - return response - - def delete_cluster( - self, - request: Optional[ - Union[bigtable_instance_admin.DeleteClusterRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Deletes a cluster from an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_delete_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteClusterRequest( - name="name_value", - ) - - # Make the request - client.delete_cluster(request=request) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.DeleteCluster. - name (str): - Required. The unique name of the cluster to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.DeleteClusterRequest): - request = bigtable_instance_admin.DeleteClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_app_profile( - self, - request: Optional[ - Union[bigtable_instance_admin.CreateAppProfileRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - app_profile_id: Optional[str] = None, - app_profile: Optional[instance.AppProfile] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.AppProfile: - r"""Creates an app profile within an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_create_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - app_profile = bigtable_admin_v2.AppProfile() - app_profile.priority = "PRIORITY_HIGH" - - request = bigtable_admin_v2.CreateAppProfileRequest( - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=app_profile, - ) - - # Make the request - response = client.create_app_profile(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.CreateAppProfile. - parent (str): - Required. The unique name of the instance in which to - create the new app profile. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (str): - Required. The ID to be used when referring to the new - app profile within its instance, e.g., just - ``myprofile`` rather than - ``projects/myproject/instances/myinstance/appProfiles/myprofile``. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): - Required. The app profile to be created. Fields marked - ``OutputOnly`` will be ignored. - - This corresponds to the ``app_profile`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.AppProfile: - A configuration object describing how - Cloud Bigtable should treat traffic from - a particular end user application. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, app_profile_id, app_profile] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.CreateAppProfileRequest): - request = bigtable_instance_admin.CreateAppProfileRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if app_profile_id is not None: - request.app_profile_id = app_profile_id - if app_profile is not None: - request.app_profile = app_profile - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_app_profile] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_app_profile( - self, - request: Optional[ - Union[bigtable_instance_admin.GetAppProfileRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.AppProfile: - r"""Gets information about an app profile. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_get_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetAppProfileRequest( - name="name_value", - ) - - # Make the request - response = client.get_app_profile(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.GetAppProfile. - name (str): - Required. The unique name of the requested app profile. - Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.AppProfile: - A configuration object describing how - Cloud Bigtable should treat traffic from - a particular end user application. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.GetAppProfileRequest): - request = bigtable_instance_admin.GetAppProfileRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_app_profile] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_app_profiles( - self, - request: Optional[ - Union[bigtable_instance_admin.ListAppProfilesRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListAppProfilesPager: - r"""Lists information about app profiles in an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_list_app_profiles(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListAppProfilesRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_app_profiles(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.ListAppProfiles. - parent (str): - Required. The unique name of the instance for which a - list of app profiles is requested. Values are of the - form ``projects/{project}/instances/{instance}``. Use - ``{instance} = '-'`` to list AppProfiles for all - Instances in a project, e.g., - ``projects/myproject/instances/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesPager: - Response message for - BigtableInstanceAdmin.ListAppProfiles. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.ListAppProfilesRequest): - request = bigtable_instance_admin.ListAppProfilesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_app_profiles] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListAppProfilesPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_app_profile( - self, - request: Optional[ - Union[bigtable_instance_admin.UpdateAppProfileRequest, dict] - ] = None, - *, - app_profile: Optional[instance.AppProfile] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Updates an app profile within an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_update_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - app_profile = bigtable_admin_v2.AppProfile() - app_profile.priority = "PRIORITY_HIGH" - - request = bigtable_admin_v2.UpdateAppProfileRequest( - app_profile=app_profile, - ) - - # Make the request - operation = client.update_app_profile(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.UpdateAppProfile. - app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): - Required. The app profile which will - (partially) replace the current value. - - This corresponds to the ``app_profile`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The subset of app profile - fields which should be replaced. If - unset, all fields will be replaced. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AppProfile` A configuration object describing how Cloud Bigtable should treat traffic - from a particular end user application. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [app_profile, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.UpdateAppProfileRequest): - request = bigtable_instance_admin.UpdateAppProfileRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if app_profile is not None: - request.app_profile = app_profile - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_app_profile] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("app_profile.name", request.app_profile.name),) - ), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - instance.AppProfile, - metadata_type=bigtable_instance_admin.UpdateAppProfileMetadata, - ) - - # Done; return the response. - return response - - def delete_app_profile( - self, - request: Optional[ - Union[bigtable_instance_admin.DeleteAppProfileRequest, dict] - ] = None, - *, - name: Optional[str] = None, - ignore_warnings: Optional[bool] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Deletes an app profile from an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_delete_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteAppProfileRequest( - name="name_value", - ignore_warnings=True, - ) - - # Make the request - client.delete_app_profile(request=request) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.DeleteAppProfile. - name (str): - Required. The unique name of the app profile to be - deleted. Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - ignore_warnings (bool): - Required. If true, ignore safety - checks when deleting the app profile. - - This corresponds to the ``ignore_warnings`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name, ignore_warnings] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.DeleteAppProfileRequest): - request = bigtable_instance_admin.DeleteAppProfileRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if ignore_warnings is not None: - request.ignore_warnings = ignore_warnings - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_app_profile] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def get_iam_policy( - self, - request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> policy_pb2.Policy: - r"""Gets the access control policy for an instance - resource. Returns an empty policy if an instance exists - but does not have a policy set. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - from google.iam.v1 import iam_policy_pb2 # type: ignore - - def sample_get_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = iam_policy_pb2.GetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = client.get_iam_policy(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): - The request object. Request message for ``GetIamPolicy`` method. - resource (str): - REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [resource] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - if isinstance(request, dict): - # - The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.GetIamPolicyRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.GetIamPolicyRequest() - if resource is not None: - request.resource = resource - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_iam_policy( - self, - request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> policy_pb2.Policy: - r"""Sets the access control policy on an instance - resource. Replaces any existing policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - from google.iam.v1 import iam_policy_pb2 # type: ignore - - def sample_set_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = iam_policy_pb2.SetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = client.set_iam_policy(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): - The request object. Request message for ``SetIamPolicy`` method. - resource (str): - REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [resource] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - if isinstance(request, dict): - # - The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.SetIamPolicyRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.SetIamPolicyRequest() - if resource is not None: - request.resource = resource - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def test_iam_permissions( - self, - request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, - *, - resource: Optional[str] = None, - permissions: Optional[MutableSequence[str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Returns permissions that the caller has on the - specified instance resource. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - from google.iam.v1 import iam_policy_pb2 # type: ignore - - def sample_test_iam_permissions(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = iam_policy_pb2.TestIamPermissionsRequest( - resource="resource_value", - permissions=['permissions_value1', 'permissions_value2'], - ) - - # Make the request - response = client.test_iam_permissions(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): - The request object. Request message for ``TestIamPermissions`` method. - resource (str): - REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - permissions (MutableSequence[str]): - The set of permissions to check for the ``resource``. - Permissions with wildcards (such as '*' or 'storage.*') - are not allowed. For more information see `IAM - Overview `__. - - This corresponds to the ``permissions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: - Response message for TestIamPermissions method. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [resource, permissions] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - if isinstance(request, dict): - # - The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.TestIamPermissionsRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.TestIamPermissionsRequest() - if resource is not None: - request.resource = resource - if permissions: - request.permissions.extend(permissions) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_hot_tablets( - self, - request: Optional[ - Union[bigtable_instance_admin.ListHotTabletsRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListHotTabletsPager: - r"""Lists hot tablets in a cluster, within the time range - provided. Hot tablets are ordered based on CPU usage. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_list_hot_tablets(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListHotTabletsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_hot_tablets(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.ListHotTablets. - parent (str): - Required. The cluster name to list hot tablets. Value is - in the following form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsPager: - Response message for - BigtableInstanceAdmin.ListHotTablets. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.ListHotTabletsRequest): - request = bigtable_instance_admin.ListHotTabletsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_hot_tablets] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListHotTabletsPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_logical_view( - self, - request: Optional[ - Union[bigtable_instance_admin.CreateLogicalViewRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - logical_view: Optional[instance.LogicalView] = None, - logical_view_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Creates a logical view within an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_create_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - logical_view = bigtable_admin_v2.LogicalView() - logical_view.query = "query_value" - - request = bigtable_admin_v2.CreateLogicalViewRequest( - parent="parent_value", - logical_view_id="logical_view_id_value", - logical_view=logical_view, - ) - - # Make the request - operation = client.create_logical_view(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.CreateLogicalView. - parent (str): - Required. The parent instance where this logical view - will be created. Format: - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - logical_view (google.cloud.bigtable_admin_v2.types.LogicalView): - Required. The logical view to create. - This corresponds to the ``logical_view`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - logical_view_id (str): - Required. The ID to use for the - logical view, which will become the - final component of the logical view's - resource name. - - This corresponds to the ``logical_view_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.LogicalView` - A SQL logical view object that can be referenced in SQL - queries. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, logical_view, logical_view_id] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.CreateLogicalViewRequest): - request = bigtable_instance_admin.CreateLogicalViewRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if logical_view is not None: - request.logical_view = logical_view - if logical_view_id is not None: - request.logical_view_id = logical_view_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_logical_view] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - instance.LogicalView, - metadata_type=bigtable_instance_admin.CreateLogicalViewMetadata, - ) - - # Done; return the response. - return response - - def get_logical_view( - self, - request: Optional[ - Union[bigtable_instance_admin.GetLogicalViewRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.LogicalView: - r"""Gets information about a logical view. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_get_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetLogicalViewRequest( - name="name_value", - ) - - # Make the request - response = client.get_logical_view(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.GetLogicalView. - name (str): - Required. The unique name of the requested logical view. - Values are of the form - ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.LogicalView: - A SQL logical view object that can be - referenced in SQL queries. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.GetLogicalViewRequest): - request = bigtable_instance_admin.GetLogicalViewRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_logical_view] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_logical_views( - self, - request: Optional[ - Union[bigtable_instance_admin.ListLogicalViewsRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListLogicalViewsPager: - r"""Lists information about logical views in an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_list_logical_views(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListLogicalViewsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_logical_views(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.ListLogicalViews. - parent (str): - Required. The unique name of the instance for which the - list of logical views is requested. Values are of the - form ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsPager: - Response message for - BigtableInstanceAdmin.ListLogicalViews. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.ListLogicalViewsRequest): - request = bigtable_instance_admin.ListLogicalViewsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_logical_views] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListLogicalViewsPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_logical_view( - self, - request: Optional[ - Union[bigtable_instance_admin.UpdateLogicalViewRequest, dict] - ] = None, - *, - logical_view: Optional[instance.LogicalView] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Updates a logical view within an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_update_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - logical_view = bigtable_admin_v2.LogicalView() - logical_view.query = "query_value" - - request = bigtable_admin_v2.UpdateLogicalViewRequest( - logical_view=logical_view, - ) - - # Make the request - operation = client.update_logical_view(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.UpdateLogicalView. - logical_view (google.cloud.bigtable_admin_v2.types.LogicalView): - Required. The logical view to update. - - The logical view's ``name`` field is used to identify - the view to update. Format: - ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. - - This corresponds to the ``logical_view`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. The list of fields to - update. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.LogicalView` - A SQL logical view object that can be referenced in SQL - queries. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [logical_view, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.UpdateLogicalViewRequest): - request = bigtable_instance_admin.UpdateLogicalViewRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if logical_view is not None: - request.logical_view = logical_view - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_logical_view] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("logical_view.name", request.logical_view.name),) - ), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - instance.LogicalView, - metadata_type=bigtable_instance_admin.UpdateLogicalViewMetadata, - ) - - # Done; return the response. - return response - - def delete_logical_view( - self, - request: Optional[ - Union[bigtable_instance_admin.DeleteLogicalViewRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Deletes a logical view from an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_delete_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteLogicalViewRequest( - name="name_value", - ) - - # Make the request - client.delete_logical_view(request=request) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.DeleteLogicalView. - name (str): - Required. The unique name of the logical view to be - deleted. Format: - ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.DeleteLogicalViewRequest): - request = bigtable_instance_admin.DeleteLogicalViewRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_logical_view] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_materialized_view( - self, - request: Optional[ - Union[bigtable_instance_admin.CreateMaterializedViewRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - materialized_view: Optional[instance.MaterializedView] = None, - materialized_view_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Creates a materialized view within an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_create_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - materialized_view = bigtable_admin_v2.MaterializedView() - materialized_view.query = "query_value" - - request = bigtable_admin_v2.CreateMaterializedViewRequest( - parent="parent_value", - materialized_view_id="materialized_view_id_value", - materialized_view=materialized_view, - ) - - # Make the request - operation = client.create_materialized_view(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.CreateMaterializedView. - parent (str): - Required. The parent instance where this materialized - view will be created. Format: - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - materialized_view (google.cloud.bigtable_admin_v2.types.MaterializedView): - Required. The materialized view to - create. - - This corresponds to the ``materialized_view`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - materialized_view_id (str): - Required. The ID to use for the - materialized view, which will become the - final component of the materialized - view's resource name. - - This corresponds to the ``materialized_view_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.MaterializedView` - A materialized view object that can be referenced in SQL - queries. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, materialized_view, materialized_view_id] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance( - request, bigtable_instance_admin.CreateMaterializedViewRequest - ): - request = bigtable_instance_admin.CreateMaterializedViewRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if materialized_view is not None: - request.materialized_view = materialized_view - if materialized_view_id is not None: - request.materialized_view_id = materialized_view_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_materialized_view] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - instance.MaterializedView, - metadata_type=bigtable_instance_admin.CreateMaterializedViewMetadata, - ) - - # Done; return the response. - return response - - def get_materialized_view( - self, - request: Optional[ - Union[bigtable_instance_admin.GetMaterializedViewRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.MaterializedView: - r"""Gets information about a materialized view. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_get_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetMaterializedViewRequest( - name="name_value", - ) - - # Make the request - response = client.get_materialized_view(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.GetMaterializedView. - name (str): - Required. The unique name of the requested materialized - view. Values are of the form - ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.MaterializedView: - A materialized view object that can - be referenced in SQL queries. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_instance_admin.GetMaterializedViewRequest): - request = bigtable_instance_admin.GetMaterializedViewRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_materialized_view] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_materialized_views( - self, - request: Optional[ - Union[bigtable_instance_admin.ListMaterializedViewsRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListMaterializedViewsPager: - r"""Lists information about materialized views in an - instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_list_materialized_views(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListMaterializedViewsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_materialized_views(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.ListMaterializedViews. - parent (str): - Required. The unique name of the instance for which the - list of materialized views is requested. Values are of - the form ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsPager: - Response message for - BigtableInstanceAdmin.ListMaterializedViews. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance( - request, bigtable_instance_admin.ListMaterializedViewsRequest - ): - request = bigtable_instance_admin.ListMaterializedViewsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_materialized_views] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListMaterializedViewsPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_materialized_view( - self, - request: Optional[ - Union[bigtable_instance_admin.UpdateMaterializedViewRequest, dict] - ] = None, - *, - materialized_view: Optional[instance.MaterializedView] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Updates a materialized view within an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_update_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - materialized_view = bigtable_admin_v2.MaterializedView() - materialized_view.query = "query_value" - - request = bigtable_admin_v2.UpdateMaterializedViewRequest( - materialized_view=materialized_view, - ) - - # Make the request - operation = client.update_materialized_view(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.UpdateMaterializedView. - materialized_view (google.cloud.bigtable_admin_v2.types.MaterializedView): - Required. The materialized view to update. - - The materialized view's ``name`` field is used to - identify the view to update. Format: - ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. - - This corresponds to the ``materialized_view`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. The list of fields to - update. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.MaterializedView` - A materialized view object that can be referenced in SQL - queries. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [materialized_view, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance( - request, bigtable_instance_admin.UpdateMaterializedViewRequest - ): - request = bigtable_instance_admin.UpdateMaterializedViewRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if materialized_view is not None: - request.materialized_view = materialized_view - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_materialized_view] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("materialized_view.name", request.materialized_view.name),) - ), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - instance.MaterializedView, - metadata_type=bigtable_instance_admin.UpdateMaterializedViewMetadata, - ) - - # Done; return the response. - return response - - def delete_materialized_view( - self, - request: Optional[ - Union[bigtable_instance_admin.DeleteMaterializedViewRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Deletes a materialized view from an instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_delete_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteMaterializedViewRequest( - name="name_value", - ) - - # Make the request - client.delete_materialized_view(request=request) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.DeleteMaterializedView. - name (str): - Required. The unique name of the materialized view to be - deleted. Format: - ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance( - request, bigtable_instance_admin.DeleteMaterializedViewRequest - ): - request = bigtable_instance_admin.DeleteMaterializedViewRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_materialized_view] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def __enter__(self) -> "BigtableInstanceAdminClient": - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=package_version.__version__ -) - -if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER - DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ - -__all__ = ("BigtableInstanceAdminClient",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py deleted file mode 100644 index ce5b67b27..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ /dev/null @@ -1,681 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import retry_async as retries_async -from typing import ( - Any, - AsyncIterator, - Awaitable, - Callable, - Sequence, - Tuple, - Optional, - Iterator, - Union, -) - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] - OptionalAsyncRetry = Union[ - retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None - ] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore - -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import instance - - -class ListAppProfilesPager: - """A pager for iterating through ``list_app_profiles`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``app_profiles`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListAppProfiles`` requests and continue to iterate - through the ``app_profiles`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[..., bigtable_instance_admin.ListAppProfilesResponse], - request: bigtable_instance_admin.ListAppProfilesRequest, - response: bigtable_instance_admin.ListAppProfilesResponse, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): - The initial response object. - retry (google.api_core.retry.Retry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_instance_admin.ListAppProfilesRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_instance_admin.ListAppProfilesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __iter__(self) -> Iterator[instance.AppProfile]: - for page in self.pages: - yield from page.app_profiles - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListAppProfilesAsyncPager: - """A pager for iterating through ``list_app_profiles`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``app_profiles`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListAppProfiles`` requests and continue to iterate - through the ``app_profiles`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[ - ..., Awaitable[bigtable_instance_admin.ListAppProfilesResponse] - ], - request: bigtable_instance_admin.ListAppProfilesRequest, - response: bigtable_instance_admin.ListAppProfilesResponse, - *, - retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): - The initial response object. - retry (google.api_core.retry.AsyncRetry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_instance_admin.ListAppProfilesRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages( - self, - ) -> AsyncIterator[bigtable_instance_admin.ListAppProfilesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __aiter__(self) -> AsyncIterator[instance.AppProfile]: - async def async_generator(): - async for page in self.pages: - for response in page.app_profiles: - yield response - - return async_generator() - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListHotTabletsPager: - """A pager for iterating through ``list_hot_tablets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``hot_tablets`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListHotTablets`` requests and continue to iterate - through the ``hot_tablets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[..., bigtable_instance_admin.ListHotTabletsResponse], - request: bigtable_instance_admin.ListHotTabletsRequest, - response: bigtable_instance_admin.ListHotTabletsResponse, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): - The initial response object. - retry (google.api_core.retry.Retry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_instance_admin.ListHotTabletsRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_instance_admin.ListHotTabletsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __iter__(self) -> Iterator[instance.HotTablet]: - for page in self.pages: - yield from page.hot_tablets - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListHotTabletsAsyncPager: - """A pager for iterating through ``list_hot_tablets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``hot_tablets`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListHotTablets`` requests and continue to iterate - through the ``hot_tablets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[ - ..., Awaitable[bigtable_instance_admin.ListHotTabletsResponse] - ], - request: bigtable_instance_admin.ListHotTabletsRequest, - response: bigtable_instance_admin.ListHotTabletsResponse, - *, - retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): - The initial response object. - retry (google.api_core.retry.AsyncRetry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_instance_admin.ListHotTabletsRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages( - self, - ) -> AsyncIterator[bigtable_instance_admin.ListHotTabletsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __aiter__(self) -> AsyncIterator[instance.HotTablet]: - async def async_generator(): - async for page in self.pages: - for response in page.hot_tablets: - yield response - - return async_generator() - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListLogicalViewsPager: - """A pager for iterating through ``list_logical_views`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``logical_views`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListLogicalViews`` requests and continue to iterate - through the ``logical_views`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[..., bigtable_instance_admin.ListLogicalViewsResponse], - request: bigtable_instance_admin.ListLogicalViewsRequest, - response: bigtable_instance_admin.ListLogicalViewsResponse, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse): - The initial response object. - retry (google.api_core.retry.Retry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_instance_admin.ListLogicalViewsRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_instance_admin.ListLogicalViewsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __iter__(self) -> Iterator[instance.LogicalView]: - for page in self.pages: - yield from page.logical_views - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListLogicalViewsAsyncPager: - """A pager for iterating through ``list_logical_views`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``logical_views`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListLogicalViews`` requests and continue to iterate - through the ``logical_views`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[ - ..., Awaitable[bigtable_instance_admin.ListLogicalViewsResponse] - ], - request: bigtable_instance_admin.ListLogicalViewsRequest, - response: bigtable_instance_admin.ListLogicalViewsResponse, - *, - retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse): - The initial response object. - retry (google.api_core.retry.AsyncRetry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_instance_admin.ListLogicalViewsRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages( - self, - ) -> AsyncIterator[bigtable_instance_admin.ListLogicalViewsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __aiter__(self) -> AsyncIterator[instance.LogicalView]: - async def async_generator(): - async for page in self.pages: - for response in page.logical_views: - yield response - - return async_generator() - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListMaterializedViewsPager: - """A pager for iterating through ``list_materialized_views`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``materialized_views`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListMaterializedViews`` requests and continue to iterate - through the ``materialized_views`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[..., bigtable_instance_admin.ListMaterializedViewsResponse], - request: bigtable_instance_admin.ListMaterializedViewsRequest, - response: bigtable_instance_admin.ListMaterializedViewsResponse, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse): - The initial response object. - retry (google.api_core.retry.Retry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_instance_admin.ListMaterializedViewsRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_instance_admin.ListMaterializedViewsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __iter__(self) -> Iterator[instance.MaterializedView]: - for page in self.pages: - yield from page.materialized_views - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListMaterializedViewsAsyncPager: - """A pager for iterating through ``list_materialized_views`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``materialized_views`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListMaterializedViews`` requests and continue to iterate - through the ``materialized_views`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[ - ..., Awaitable[bigtable_instance_admin.ListMaterializedViewsResponse] - ], - request: bigtable_instance_admin.ListMaterializedViewsRequest, - response: bigtable_instance_admin.ListMaterializedViewsResponse, - *, - retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse): - The initial response object. - retry (google.api_core.retry.AsyncRetry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_instance_admin.ListMaterializedViewsRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages( - self, - ) -> AsyncIterator[bigtable_instance_admin.ListMaterializedViewsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __aiter__(self) -> AsyncIterator[instance.MaterializedView]: - async def async_generator(): - async for page in self.pages: - for response in page.materialized_views: - yield response - - return async_generator() - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/README.rst b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/README.rst deleted file mode 100644 index 9a01ee7c3..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/README.rst +++ /dev/null @@ -1,9 +0,0 @@ - -transport inheritance structure -_______________________________ - -`BigtableInstanceAdminTransport` is the ABC for all transports. -- public child `BigtableInstanceAdminGrpcTransport` for sync gRPC transport (defined in `grpc.py`). -- public child `BigtableInstanceAdminGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). -- private child `_BaseBigtableInstanceAdminRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). -- public child `BigtableInstanceAdminRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py deleted file mode 100644 index 021458f35..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import BigtableInstanceAdminTransport -from .grpc import BigtableInstanceAdminGrpcTransport -from .grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport -from .rest import BigtableInstanceAdminRestTransport -from .rest import BigtableInstanceAdminRestInterceptor - - -# Compile a registry of transports. -_transport_registry = ( - OrderedDict() -) # type: Dict[str, Type[BigtableInstanceAdminTransport]] -_transport_registry["grpc"] = BigtableInstanceAdminGrpcTransport -_transport_registry["grpc_asyncio"] = BigtableInstanceAdminGrpcAsyncIOTransport -_transport_registry["rest"] = BigtableInstanceAdminRestTransport - -__all__ = ( - "BigtableInstanceAdminTransport", - "BigtableInstanceAdminGrpcTransport", - "BigtableInstanceAdminGrpcAsyncIOTransport", - "BigtableInstanceAdminRestTransport", - "BigtableInstanceAdminRestInterceptor", -) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py deleted file mode 100644 index f5ceeeb68..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ /dev/null @@ -1,755 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union - -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore -import google.protobuf - -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=package_version.__version__ -) - -if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER - DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ - - -class BigtableInstanceAdminTransport(abc.ABC): - """Abstract transport class for BigtableInstanceAdmin.""" - - AUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - DEFAULT_HOST: str = "bigtableadmin.googleapis.com" - - def __init__( - self, - *, - host: str = DEFAULT_HOST, - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - if not hasattr(self, "_ignore_credentials"): - self._ignore_credentials: bool = False - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs( - "'credentials_file' and 'credentials' are mutually exclusive" - ) - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id - ) - elif credentials is None and not self._ignore_credentials: - credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id - ) - # Don't apply audience if the credentials file passed from user. - if hasattr(credentials, "with_gdch_audience"): - credentials = credentials.with_gdch_audience( - api_audience if api_audience else host - ) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if ( - always_use_jwt_access - and isinstance(credentials, service_account.Credentials) - and hasattr(service_account.Credentials, "with_always_use_jwt_access") - ): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ":" not in host: - host += ":443" - self._host = host - - @property - def host(self): - return self._host - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_instance: gapic_v1.method.wrap_method( - self.create_instance, - default_timeout=300.0, - client_info=client_info, - ), - self.get_instance: gapic_v1.method.wrap_method( - self.get_instance, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_instances: gapic_v1.method.wrap_method( - self.list_instances, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_instance: gapic_v1.method.wrap_method( - self.update_instance, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.partial_update_instance: gapic_v1.method.wrap_method( - self.partial_update_instance, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.delete_instance: gapic_v1.method.wrap_method( - self.delete_instance, - default_timeout=60.0, - client_info=client_info, - ), - self.create_cluster: gapic_v1.method.wrap_method( - self.create_cluster, - default_timeout=60.0, - client_info=client_info, - ), - self.get_cluster: gapic_v1.method.wrap_method( - self.get_cluster, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_clusters: gapic_v1.method.wrap_method( - self.list_clusters, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_cluster: gapic_v1.method.wrap_method( - self.update_cluster, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.partial_update_cluster: gapic_v1.method.wrap_method( - self.partial_update_cluster, - default_timeout=None, - client_info=client_info, - ), - self.delete_cluster: gapic_v1.method.wrap_method( - self.delete_cluster, - default_timeout=60.0, - client_info=client_info, - ), - self.create_app_profile: gapic_v1.method.wrap_method( - self.create_app_profile, - default_timeout=60.0, - client_info=client_info, - ), - self.get_app_profile: gapic_v1.method.wrap_method( - self.get_app_profile, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_app_profiles: gapic_v1.method.wrap_method( - self.list_app_profiles, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_app_profile: gapic_v1.method.wrap_method( - self.update_app_profile, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.delete_app_profile: gapic_v1.method.wrap_method( - self.delete_app_profile, - default_timeout=60.0, - client_info=client_info, - ), - self.get_iam_policy: gapic_v1.method.wrap_method( - self.get_iam_policy, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.set_iam_policy: gapic_v1.method.wrap_method( - self.set_iam_policy, - default_timeout=60.0, - client_info=client_info, - ), - self.test_iam_permissions: gapic_v1.method.wrap_method( - self.test_iam_permissions, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_hot_tablets: gapic_v1.method.wrap_method( - self.list_hot_tablets, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.create_logical_view: gapic_v1.method.wrap_method( - self.create_logical_view, - default_timeout=None, - client_info=client_info, - ), - self.get_logical_view: gapic_v1.method.wrap_method( - self.get_logical_view, - default_timeout=None, - client_info=client_info, - ), - self.list_logical_views: gapic_v1.method.wrap_method( - self.list_logical_views, - default_timeout=None, - client_info=client_info, - ), - self.update_logical_view: gapic_v1.method.wrap_method( - self.update_logical_view, - default_timeout=None, - client_info=client_info, - ), - self.delete_logical_view: gapic_v1.method.wrap_method( - self.delete_logical_view, - default_timeout=None, - client_info=client_info, - ), - self.create_materialized_view: gapic_v1.method.wrap_method( - self.create_materialized_view, - default_timeout=None, - client_info=client_info, - ), - self.get_materialized_view: gapic_v1.method.wrap_method( - self.get_materialized_view, - default_timeout=None, - client_info=client_info, - ), - self.list_materialized_views: gapic_v1.method.wrap_method( - self.list_materialized_views, - default_timeout=None, - client_info=client_info, - ), - self.update_materialized_view: gapic_v1.method.wrap_method( - self.update_materialized_view, - default_timeout=None, - client_info=client_info, - ), - self.delete_materialized_view: gapic_v1.method.wrap_method( - self.delete_materialized_view, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_instance( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateInstanceRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def get_instance( - self, - ) -> Callable[ - [bigtable_instance_admin.GetInstanceRequest], - Union[instance.Instance, Awaitable[instance.Instance]], - ]: - raise NotImplementedError() - - @property - def list_instances( - self, - ) -> Callable[ - [bigtable_instance_admin.ListInstancesRequest], - Union[ - bigtable_instance_admin.ListInstancesResponse, - Awaitable[bigtable_instance_admin.ListInstancesResponse], - ], - ]: - raise NotImplementedError() - - @property - def update_instance( - self, - ) -> Callable[ - [instance.Instance], Union[instance.Instance, Awaitable[instance.Instance]] - ]: - raise NotImplementedError() - - @property - def partial_update_instance( - self, - ) -> Callable[ - [bigtable_instance_admin.PartialUpdateInstanceRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def delete_instance( - self, - ) -> Callable[ - [bigtable_instance_admin.DeleteInstanceRequest], - Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], - ]: - raise NotImplementedError() - - @property - def create_cluster( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateClusterRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def get_cluster( - self, - ) -> Callable[ - [bigtable_instance_admin.GetClusterRequest], - Union[instance.Cluster, Awaitable[instance.Cluster]], - ]: - raise NotImplementedError() - - @property - def list_clusters( - self, - ) -> Callable[ - [bigtable_instance_admin.ListClustersRequest], - Union[ - bigtable_instance_admin.ListClustersResponse, - Awaitable[bigtable_instance_admin.ListClustersResponse], - ], - ]: - raise NotImplementedError() - - @property - def update_cluster( - self, - ) -> Callable[ - [instance.Cluster], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def partial_update_cluster( - self, - ) -> Callable[ - [bigtable_instance_admin.PartialUpdateClusterRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def delete_cluster( - self, - ) -> Callable[ - [bigtable_instance_admin.DeleteClusterRequest], - Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], - ]: - raise NotImplementedError() - - @property - def create_app_profile( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateAppProfileRequest], - Union[instance.AppProfile, Awaitable[instance.AppProfile]], - ]: - raise NotImplementedError() - - @property - def get_app_profile( - self, - ) -> Callable[ - [bigtable_instance_admin.GetAppProfileRequest], - Union[instance.AppProfile, Awaitable[instance.AppProfile]], - ]: - raise NotImplementedError() - - @property - def list_app_profiles( - self, - ) -> Callable[ - [bigtable_instance_admin.ListAppProfilesRequest], - Union[ - bigtable_instance_admin.ListAppProfilesResponse, - Awaitable[bigtable_instance_admin.ListAppProfilesResponse], - ], - ]: - raise NotImplementedError() - - @property - def update_app_profile( - self, - ) -> Callable[ - [bigtable_instance_admin.UpdateAppProfileRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def delete_app_profile( - self, - ) -> Callable[ - [bigtable_instance_admin.DeleteAppProfileRequest], - Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], - ]: - raise NotImplementedError() - - @property - def get_iam_policy( - self, - ) -> Callable[ - [iam_policy_pb2.GetIamPolicyRequest], - Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], - ]: - raise NotImplementedError() - - @property - def set_iam_policy( - self, - ) -> Callable[ - [iam_policy_pb2.SetIamPolicyRequest], - Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], - ]: - raise NotImplementedError() - - @property - def test_iam_permissions( - self, - ) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - Union[ - iam_policy_pb2.TestIamPermissionsResponse, - Awaitable[iam_policy_pb2.TestIamPermissionsResponse], - ], - ]: - raise NotImplementedError() - - @property - def list_hot_tablets( - self, - ) -> Callable[ - [bigtable_instance_admin.ListHotTabletsRequest], - Union[ - bigtable_instance_admin.ListHotTabletsResponse, - Awaitable[bigtable_instance_admin.ListHotTabletsResponse], - ], - ]: - raise NotImplementedError() - - @property - def create_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateLogicalViewRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def get_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.GetLogicalViewRequest], - Union[instance.LogicalView, Awaitable[instance.LogicalView]], - ]: - raise NotImplementedError() - - @property - def list_logical_views( - self, - ) -> Callable[ - [bigtable_instance_admin.ListLogicalViewsRequest], - Union[ - bigtable_instance_admin.ListLogicalViewsResponse, - Awaitable[bigtable_instance_admin.ListLogicalViewsResponse], - ], - ]: - raise NotImplementedError() - - @property - def update_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.UpdateLogicalViewRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def delete_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.DeleteLogicalViewRequest], - Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], - ]: - raise NotImplementedError() - - @property - def create_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateMaterializedViewRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def get_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.GetMaterializedViewRequest], - Union[instance.MaterializedView, Awaitable[instance.MaterializedView]], - ]: - raise NotImplementedError() - - @property - def list_materialized_views( - self, - ) -> Callable[ - [bigtable_instance_admin.ListMaterializedViewsRequest], - Union[ - bigtable_instance_admin.ListMaterializedViewsResponse, - Awaitable[bigtable_instance_admin.ListMaterializedViewsResponse], - ], - ]: - raise NotImplementedError() - - @property - def update_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.UpdateMaterializedViewRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def delete_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.DeleteMaterializedViewRequest], - Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], - ]: - raise NotImplementedError() - - @property - def kind(self) -> str: - raise NotImplementedError() - - -__all__ = ("BigtableInstanceAdminTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py deleted file mode 100644 index a294144ef..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ /dev/null @@ -1,1248 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import json -import logging as std_logging -import pickle -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.protobuf.json_format import MessageToJson -import google.protobuf.message - -import grpc # type: ignore -import proto # type: ignore - -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO - -try: - from google.api_core import client_logging # type: ignore - - CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER -except ImportError: # pragma: NO COVER - CLIENT_LOGGING_SUPPORTED = False - -_LOGGER = std_logging.getLogger(__name__) - - -class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER - def intercept_unary_unary(self, continuation, client_call_details, request): - logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - std_logging.DEBUG - ) - if logging_enabled: # pragma: NO COVER - request_metadata = client_call_details.metadata - if isinstance(request, proto.Message): - request_payload = type(request).to_json(request) - elif isinstance(request, google.protobuf.message.Message): - request_payload = MessageToJson(request) - else: - request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" - - request_metadata = { - key: value.decode("utf-8") if isinstance(value, bytes) else value - for key, value in request_metadata - } - grpc_request = { - "payload": request_payload, - "requestMethod": "grpc", - "metadata": dict(request_metadata), - } - _LOGGER.debug( - f"Sending request for {client_call_details.method}", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": str(client_call_details.method), - "request": grpc_request, - "metadata": grpc_request["metadata"], - }, - ) - response = continuation(client_call_details, request) - if logging_enabled: # pragma: NO COVER - response_metadata = response.trailing_metadata() - # Convert gRPC metadata `` to list of tuples - metadata = ( - dict([(k, str(v)) for k, v in response_metadata]) - if response_metadata - else None - ) - result = response.result() - if isinstance(result, proto.Message): - response_payload = type(result).to_json(result) - elif isinstance(result, google.protobuf.message.Message): - response_payload = MessageToJson(result) - else: - response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" - grpc_response = { - "payload": response_payload, - "metadata": metadata, - "status": "OK", - } - _LOGGER.debug( - f"Received response for {client_call_details.method}.", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": client_call_details.method, - "response": grpc_response, - "metadata": grpc_response["metadata"], - }, - ) - return response - - -class BigtableInstanceAdminGrpcTransport(BigtableInstanceAdminTransport): - """gRPC backend transport for BigtableInstanceAdmin. - - Service for creating, configuring, and deleting Cloud - Bigtable Instances and Clusters. Provides access to the Instance - and Cluster schemas only, not the tables' metadata or data - stored in those tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _stubs: Dict[str, Callable] - - def __init__( - self, - *, - host: str = "bigtableadmin.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if a ``channel`` instance is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if a ``channel`` instance is provided. - channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): - A ``Channel`` instance through which to make calls, or a Callable - that constructs and returns one. If set to None, ``self.create_channel`` - is used to create the channel. If a Callable is given, it will be called - with the same arguments as used in ``self.create_channel``. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if a ``channel`` instance is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if isinstance(channel, grpc.Channel): - # Ignore credentials if a channel was passed. - credentials = None - self._ignore_credentials = True - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - # initialize with the provided callable or the default channel - channel_init = channel or type(self).create_channel - self._grpc_channel = channel_init( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - self._interceptor = _LoggingClientInterceptor() - self._logged_channel = grpc.intercept_channel( - self._grpc_channel, self._interceptor - ) - - # Wrap messages. This must be done after self._logged_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel( - cls, - host: str = "bigtableadmin.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs, - ) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs, - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self._logged_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_instance( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateInstanceRequest], operations_pb2.Operation - ]: - r"""Return a callable for the create instance method over gRPC. - - Create an instance within a project. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - Returns: - Callable[[~.CreateInstanceRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_instance" not in self._stubs: - self._stubs["create_instance"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", - request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_instance"] - - @property - def get_instance( - self, - ) -> Callable[[bigtable_instance_admin.GetInstanceRequest], instance.Instance]: - r"""Return a callable for the get instance method over gRPC. - - Gets information about an instance. - - Returns: - Callable[[~.GetInstanceRequest], - ~.Instance]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_instance" not in self._stubs: - self._stubs["get_instance"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", - request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, - response_deserializer=instance.Instance.deserialize, - ) - return self._stubs["get_instance"] - - @property - def list_instances( - self, - ) -> Callable[ - [bigtable_instance_admin.ListInstancesRequest], - bigtable_instance_admin.ListInstancesResponse, - ]: - r"""Return a callable for the list instances method over gRPC. - - Lists information about instances in a project. - - Returns: - Callable[[~.ListInstancesRequest], - ~.ListInstancesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_instances" not in self._stubs: - self._stubs["list_instances"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", - request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, - response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, - ) - return self._stubs["list_instances"] - - @property - def update_instance(self) -> Callable[[instance.Instance], instance.Instance]: - r"""Return a callable for the update instance method over gRPC. - - Updates an instance within a project. This method - updates only the display name and type for an Instance. - To update other Instance properties, such as labels, use - PartialUpdateInstance. - - Returns: - Callable[[~.Instance], - ~.Instance]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_instance" not in self._stubs: - self._stubs["update_instance"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", - request_serializer=instance.Instance.serialize, - response_deserializer=instance.Instance.deserialize, - ) - return self._stubs["update_instance"] - - @property - def partial_update_instance( - self, - ) -> Callable[ - [bigtable_instance_admin.PartialUpdateInstanceRequest], operations_pb2.Operation - ]: - r"""Return a callable for the partial update instance method over gRPC. - - Partially updates an instance within a project. This - method can modify all fields of an Instance and is the - preferred way to update an Instance. - - Returns: - Callable[[~.PartialUpdateInstanceRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "partial_update_instance" not in self._stubs: - self._stubs["partial_update_instance"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", - request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["partial_update_instance"] - - @property - def delete_instance( - self, - ) -> Callable[[bigtable_instance_admin.DeleteInstanceRequest], empty_pb2.Empty]: - r"""Return a callable for the delete instance method over gRPC. - - Delete an instance from a project. - - Returns: - Callable[[~.DeleteInstanceRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_instance" not in self._stubs: - self._stubs["delete_instance"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", - request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_instance"] - - @property - def create_cluster( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateClusterRequest], operations_pb2.Operation - ]: - r"""Return a callable for the create cluster method over gRPC. - - Creates a cluster within an instance. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - Returns: - Callable[[~.CreateClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_cluster" not in self._stubs: - self._stubs["create_cluster"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", - request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_cluster"] - - @property - def get_cluster( - self, - ) -> Callable[[bigtable_instance_admin.GetClusterRequest], instance.Cluster]: - r"""Return a callable for the get cluster method over gRPC. - - Gets information about a cluster. - - Returns: - Callable[[~.GetClusterRequest], - ~.Cluster]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_cluster" not in self._stubs: - self._stubs["get_cluster"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", - request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, - response_deserializer=instance.Cluster.deserialize, - ) - return self._stubs["get_cluster"] - - @property - def list_clusters( - self, - ) -> Callable[ - [bigtable_instance_admin.ListClustersRequest], - bigtable_instance_admin.ListClustersResponse, - ]: - r"""Return a callable for the list clusters method over gRPC. - - Lists information about clusters in an instance. - - Returns: - Callable[[~.ListClustersRequest], - ~.ListClustersResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_clusters" not in self._stubs: - self._stubs["list_clusters"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", - request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, - response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, - ) - return self._stubs["list_clusters"] - - @property - def update_cluster(self) -> Callable[[instance.Cluster], operations_pb2.Operation]: - r"""Return a callable for the update cluster method over gRPC. - - Updates a cluster within an instance. - - Note that UpdateCluster does not support updating - cluster_config.cluster_autoscaling_config. In order to update - it, you must use PartialUpdateCluster. - - Returns: - Callable[[~.Cluster], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_cluster" not in self._stubs: - self._stubs["update_cluster"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", - request_serializer=instance.Cluster.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_cluster"] - - @property - def partial_update_cluster( - self, - ) -> Callable[ - [bigtable_instance_admin.PartialUpdateClusterRequest], operations_pb2.Operation - ]: - r"""Return a callable for the partial update cluster method over gRPC. - - Partially updates a cluster within a project. This method is the - preferred way to update a Cluster. - - To enable and update autoscaling, set - cluster_config.cluster_autoscaling_config. When autoscaling is - enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning - that updates to it are ignored. Note that an update cannot - simultaneously set serve_nodes to non-zero and - cluster_config.cluster_autoscaling_config to non-empty, and also - specify both in the update_mask. - - To disable autoscaling, clear - cluster_config.cluster_autoscaling_config, and explicitly set a - serve_node count via the update_mask. - - Returns: - Callable[[~.PartialUpdateClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "partial_update_cluster" not in self._stubs: - self._stubs["partial_update_cluster"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster", - request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["partial_update_cluster"] - - @property - def delete_cluster( - self, - ) -> Callable[[bigtable_instance_admin.DeleteClusterRequest], empty_pb2.Empty]: - r"""Return a callable for the delete cluster method over gRPC. - - Deletes a cluster from an instance. - - Returns: - Callable[[~.DeleteClusterRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_cluster" not in self._stubs: - self._stubs["delete_cluster"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", - request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_cluster"] - - @property - def create_app_profile( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateAppProfileRequest], instance.AppProfile - ]: - r"""Return a callable for the create app profile method over gRPC. - - Creates an app profile within an instance. - - Returns: - Callable[[~.CreateAppProfileRequest], - ~.AppProfile]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_app_profile" not in self._stubs: - self._stubs["create_app_profile"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", - request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, - response_deserializer=instance.AppProfile.deserialize, - ) - return self._stubs["create_app_profile"] - - @property - def get_app_profile( - self, - ) -> Callable[[bigtable_instance_admin.GetAppProfileRequest], instance.AppProfile]: - r"""Return a callable for the get app profile method over gRPC. - - Gets information about an app profile. - - Returns: - Callable[[~.GetAppProfileRequest], - ~.AppProfile]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_app_profile" not in self._stubs: - self._stubs["get_app_profile"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", - request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, - response_deserializer=instance.AppProfile.deserialize, - ) - return self._stubs["get_app_profile"] - - @property - def list_app_profiles( - self, - ) -> Callable[ - [bigtable_instance_admin.ListAppProfilesRequest], - bigtable_instance_admin.ListAppProfilesResponse, - ]: - r"""Return a callable for the list app profiles method over gRPC. - - Lists information about app profiles in an instance. - - Returns: - Callable[[~.ListAppProfilesRequest], - ~.ListAppProfilesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_app_profiles" not in self._stubs: - self._stubs["list_app_profiles"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", - request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, - response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, - ) - return self._stubs["list_app_profiles"] - - @property - def update_app_profile( - self, - ) -> Callable[ - [bigtable_instance_admin.UpdateAppProfileRequest], operations_pb2.Operation - ]: - r"""Return a callable for the update app profile method over gRPC. - - Updates an app profile within an instance. - - Returns: - Callable[[~.UpdateAppProfileRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_app_profile" not in self._stubs: - self._stubs["update_app_profile"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", - request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_app_profile"] - - @property - def delete_app_profile( - self, - ) -> Callable[[bigtable_instance_admin.DeleteAppProfileRequest], empty_pb2.Empty]: - r"""Return a callable for the delete app profile method over gRPC. - - Deletes an app profile from an instance. - - Returns: - Callable[[~.DeleteAppProfileRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_app_profile" not in self._stubs: - self._stubs["delete_app_profile"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", - request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_app_profile"] - - @property - def get_iam_policy( - self, - ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: - r"""Return a callable for the get iam policy method over gRPC. - - Gets the access control policy for an instance - resource. Returns an empty policy if an instance exists - but does not have a policy set. - - Returns: - Callable[[~.GetIamPolicyRequest], - ~.Policy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", - request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs["get_iam_policy"] - - @property - def set_iam_policy( - self, - ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: - r"""Return a callable for the set iam policy method over gRPC. - - Sets the access control policy on an instance - resource. Replaces any existing policy. - - Returns: - Callable[[~.SetIamPolicyRequest], - ~.Policy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", - request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs["set_iam_policy"] - - @property - def test_iam_permissions( - self, - ) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - iam_policy_pb2.TestIamPermissionsResponse, - ]: - r"""Return a callable for the test iam permissions method over gRPC. - - Returns permissions that the caller has on the - specified instance resource. - - Returns: - Callable[[~.TestIamPermissionsRequest], - ~.TestIamPermissionsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", - request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, - ) - return self._stubs["test_iam_permissions"] - - @property - def list_hot_tablets( - self, - ) -> Callable[ - [bigtable_instance_admin.ListHotTabletsRequest], - bigtable_instance_admin.ListHotTabletsResponse, - ]: - r"""Return a callable for the list hot tablets method over gRPC. - - Lists hot tablets in a cluster, within the time range - provided. Hot tablets are ordered based on CPU usage. - - Returns: - Callable[[~.ListHotTabletsRequest], - ~.ListHotTabletsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_hot_tablets" not in self._stubs: - self._stubs["list_hot_tablets"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets", - request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, - response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, - ) - return self._stubs["list_hot_tablets"] - - @property - def create_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateLogicalViewRequest], operations_pb2.Operation - ]: - r"""Return a callable for the create logical view method over gRPC. - - Creates a logical view within an instance. - - Returns: - Callable[[~.CreateLogicalViewRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_logical_view" not in self._stubs: - self._stubs["create_logical_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateLogicalView", - request_serializer=bigtable_instance_admin.CreateLogicalViewRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_logical_view"] - - @property - def get_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.GetLogicalViewRequest], instance.LogicalView - ]: - r"""Return a callable for the get logical view method over gRPC. - - Gets information about a logical view. - - Returns: - Callable[[~.GetLogicalViewRequest], - ~.LogicalView]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_logical_view" not in self._stubs: - self._stubs["get_logical_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetLogicalView", - request_serializer=bigtable_instance_admin.GetLogicalViewRequest.serialize, - response_deserializer=instance.LogicalView.deserialize, - ) - return self._stubs["get_logical_view"] - - @property - def list_logical_views( - self, - ) -> Callable[ - [bigtable_instance_admin.ListLogicalViewsRequest], - bigtable_instance_admin.ListLogicalViewsResponse, - ]: - r"""Return a callable for the list logical views method over gRPC. - - Lists information about logical views in an instance. - - Returns: - Callable[[~.ListLogicalViewsRequest], - ~.ListLogicalViewsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_logical_views" not in self._stubs: - self._stubs["list_logical_views"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListLogicalViews", - request_serializer=bigtable_instance_admin.ListLogicalViewsRequest.serialize, - response_deserializer=bigtable_instance_admin.ListLogicalViewsResponse.deserialize, - ) - return self._stubs["list_logical_views"] - - @property - def update_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.UpdateLogicalViewRequest], operations_pb2.Operation - ]: - r"""Return a callable for the update logical view method over gRPC. - - Updates a logical view within an instance. - - Returns: - Callable[[~.UpdateLogicalViewRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_logical_view" not in self._stubs: - self._stubs["update_logical_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateLogicalView", - request_serializer=bigtable_instance_admin.UpdateLogicalViewRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_logical_view"] - - @property - def delete_logical_view( - self, - ) -> Callable[[bigtable_instance_admin.DeleteLogicalViewRequest], empty_pb2.Empty]: - r"""Return a callable for the delete logical view method over gRPC. - - Deletes a logical view from an instance. - - Returns: - Callable[[~.DeleteLogicalViewRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_logical_view" not in self._stubs: - self._stubs["delete_logical_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteLogicalView", - request_serializer=bigtable_instance_admin.DeleteLogicalViewRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_logical_view"] - - @property - def create_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateMaterializedViewRequest], - operations_pb2.Operation, - ]: - r"""Return a callable for the create materialized view method over gRPC. - - Creates a materialized view within an instance. - - Returns: - Callable[[~.CreateMaterializedViewRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_materialized_view" not in self._stubs: - self._stubs["create_materialized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateMaterializedView", - request_serializer=bigtable_instance_admin.CreateMaterializedViewRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_materialized_view"] - - @property - def get_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.GetMaterializedViewRequest], instance.MaterializedView - ]: - r"""Return a callable for the get materialized view method over gRPC. - - Gets information about a materialized view. - - Returns: - Callable[[~.GetMaterializedViewRequest], - ~.MaterializedView]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_materialized_view" not in self._stubs: - self._stubs["get_materialized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetMaterializedView", - request_serializer=bigtable_instance_admin.GetMaterializedViewRequest.serialize, - response_deserializer=instance.MaterializedView.deserialize, - ) - return self._stubs["get_materialized_view"] - - @property - def list_materialized_views( - self, - ) -> Callable[ - [bigtable_instance_admin.ListMaterializedViewsRequest], - bigtable_instance_admin.ListMaterializedViewsResponse, - ]: - r"""Return a callable for the list materialized views method over gRPC. - - Lists information about materialized views in an - instance. - - Returns: - Callable[[~.ListMaterializedViewsRequest], - ~.ListMaterializedViewsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_materialized_views" not in self._stubs: - self._stubs["list_materialized_views"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListMaterializedViews", - request_serializer=bigtable_instance_admin.ListMaterializedViewsRequest.serialize, - response_deserializer=bigtable_instance_admin.ListMaterializedViewsResponse.deserialize, - ) - return self._stubs["list_materialized_views"] - - @property - def update_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.UpdateMaterializedViewRequest], - operations_pb2.Operation, - ]: - r"""Return a callable for the update materialized view method over gRPC. - - Updates a materialized view within an instance. - - Returns: - Callable[[~.UpdateMaterializedViewRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_materialized_view" not in self._stubs: - self._stubs["update_materialized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateMaterializedView", - request_serializer=bigtable_instance_admin.UpdateMaterializedViewRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_materialized_view"] - - @property - def delete_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.DeleteMaterializedViewRequest], empty_pb2.Empty - ]: - r"""Return a callable for the delete materialized view method over gRPC. - - Deletes a materialized view from an instance. - - Returns: - Callable[[~.DeleteMaterializedViewRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_materialized_view" not in self._stubs: - self._stubs["delete_materialized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteMaterializedView", - request_serializer=bigtable_instance_admin.DeleteMaterializedViewRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_materialized_view"] - - def close(self): - self._logged_channel.close() - - @property - def kind(self) -> str: - return "grpc" - - -__all__ = ("BigtableInstanceAdminGrpcTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py deleted file mode 100644 index aae0f44c4..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ /dev/null @@ -1,1579 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import inspect -import json -import pickle -import logging as std_logging -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import exceptions as core_exceptions -from google.api_core import retry_async as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.protobuf.json_format import MessageToJson -import google.protobuf.message - -import grpc # type: ignore -import proto # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO -from .grpc import BigtableInstanceAdminGrpcTransport - -try: - from google.api_core import client_logging # type: ignore - - CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER -except ImportError: # pragma: NO COVER - CLIENT_LOGGING_SUPPORTED = False - -_LOGGER = std_logging.getLogger(__name__) - - -class _LoggingClientAIOInterceptor( - grpc.aio.UnaryUnaryClientInterceptor -): # pragma: NO COVER - async def intercept_unary_unary(self, continuation, client_call_details, request): - logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - std_logging.DEBUG - ) - if logging_enabled: # pragma: NO COVER - request_metadata = client_call_details.metadata - if isinstance(request, proto.Message): - request_payload = type(request).to_json(request) - elif isinstance(request, google.protobuf.message.Message): - request_payload = MessageToJson(request) - else: - request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" - - request_metadata = { - key: value.decode("utf-8") if isinstance(value, bytes) else value - for key, value in request_metadata - } - grpc_request = { - "payload": request_payload, - "requestMethod": "grpc", - "metadata": dict(request_metadata), - } - _LOGGER.debug( - f"Sending request for {client_call_details.method}", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": str(client_call_details.method), - "request": grpc_request, - "metadata": grpc_request["metadata"], - }, - ) - response = await continuation(client_call_details, request) - if logging_enabled: # pragma: NO COVER - response_metadata = await response.trailing_metadata() - # Convert gRPC metadata `` to list of tuples - metadata = ( - dict([(k, str(v)) for k, v in response_metadata]) - if response_metadata - else None - ) - result = await response - if isinstance(result, proto.Message): - response_payload = type(result).to_json(result) - elif isinstance(result, google.protobuf.message.Message): - response_payload = MessageToJson(result) - else: - response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" - grpc_response = { - "payload": response_payload, - "metadata": metadata, - "status": "OK", - } - _LOGGER.debug( - f"Received response to rpc {client_call_details.method}.", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": str(client_call_details.method), - "response": grpc_response, - "metadata": grpc_response["metadata"], - }, - ) - return response - - -class BigtableInstanceAdminGrpcAsyncIOTransport(BigtableInstanceAdminTransport): - """gRPC AsyncIO backend transport for BigtableInstanceAdmin. - - Service for creating, configuring, and deleting Cloud - Bigtable Instances and Clusters. Provides access to the Instance - and Cluster schemas only, not the tables' metadata or data - stored in those tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel( - cls, - host: str = "bigtableadmin.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs, - ) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs, - ) - - def __init__( - self, - *, - host: str = "bigtableadmin.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if a ``channel`` instance is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): - A ``Channel`` instance through which to make calls, or a Callable - that constructs and returns one. If set to None, ``self.create_channel`` - is used to create the channel. If a Callable is given, it will be called - with the same arguments as used in ``self.create_channel``. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if a ``channel`` instance is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if isinstance(channel, aio.Channel): - # Ignore credentials if a channel was passed. - credentials = None - self._ignore_credentials = True - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - # initialize with the provided callable or the default channel - channel_init = channel or type(self).create_channel - self._grpc_channel = channel_init( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - self._interceptor = _LoggingClientAIOInterceptor() - self._grpc_channel._unary_unary_interceptors.append(self._interceptor) - self._logged_channel = self._grpc_channel - self._wrap_with_kind = ( - "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters - ) - # Wrap messages. This must be done after self._logged_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self._logged_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_instance( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateInstanceRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the create instance method over gRPC. - - Create an instance within a project. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - Returns: - Callable[[~.CreateInstanceRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_instance" not in self._stubs: - self._stubs["create_instance"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", - request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_instance"] - - @property - def get_instance( - self, - ) -> Callable[ - [bigtable_instance_admin.GetInstanceRequest], Awaitable[instance.Instance] - ]: - r"""Return a callable for the get instance method over gRPC. - - Gets information about an instance. - - Returns: - Callable[[~.GetInstanceRequest], - Awaitable[~.Instance]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_instance" not in self._stubs: - self._stubs["get_instance"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", - request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, - response_deserializer=instance.Instance.deserialize, - ) - return self._stubs["get_instance"] - - @property - def list_instances( - self, - ) -> Callable[ - [bigtable_instance_admin.ListInstancesRequest], - Awaitable[bigtable_instance_admin.ListInstancesResponse], - ]: - r"""Return a callable for the list instances method over gRPC. - - Lists information about instances in a project. - - Returns: - Callable[[~.ListInstancesRequest], - Awaitable[~.ListInstancesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_instances" not in self._stubs: - self._stubs["list_instances"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", - request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, - response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, - ) - return self._stubs["list_instances"] - - @property - def update_instance( - self, - ) -> Callable[[instance.Instance], Awaitable[instance.Instance]]: - r"""Return a callable for the update instance method over gRPC. - - Updates an instance within a project. This method - updates only the display name and type for an Instance. - To update other Instance properties, such as labels, use - PartialUpdateInstance. - - Returns: - Callable[[~.Instance], - Awaitable[~.Instance]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_instance" not in self._stubs: - self._stubs["update_instance"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", - request_serializer=instance.Instance.serialize, - response_deserializer=instance.Instance.deserialize, - ) - return self._stubs["update_instance"] - - @property - def partial_update_instance( - self, - ) -> Callable[ - [bigtable_instance_admin.PartialUpdateInstanceRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the partial update instance method over gRPC. - - Partially updates an instance within a project. This - method can modify all fields of an Instance and is the - preferred way to update an Instance. - - Returns: - Callable[[~.PartialUpdateInstanceRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "partial_update_instance" not in self._stubs: - self._stubs["partial_update_instance"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", - request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["partial_update_instance"] - - @property - def delete_instance( - self, - ) -> Callable[ - [bigtable_instance_admin.DeleteInstanceRequest], Awaitable[empty_pb2.Empty] - ]: - r"""Return a callable for the delete instance method over gRPC. - - Delete an instance from a project. - - Returns: - Callable[[~.DeleteInstanceRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_instance" not in self._stubs: - self._stubs["delete_instance"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", - request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_instance"] - - @property - def create_cluster( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateClusterRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the create cluster method over gRPC. - - Creates a cluster within an instance. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - Returns: - Callable[[~.CreateClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_cluster" not in self._stubs: - self._stubs["create_cluster"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", - request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_cluster"] - - @property - def get_cluster( - self, - ) -> Callable[ - [bigtable_instance_admin.GetClusterRequest], Awaitable[instance.Cluster] - ]: - r"""Return a callable for the get cluster method over gRPC. - - Gets information about a cluster. - - Returns: - Callable[[~.GetClusterRequest], - Awaitable[~.Cluster]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_cluster" not in self._stubs: - self._stubs["get_cluster"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", - request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, - response_deserializer=instance.Cluster.deserialize, - ) - return self._stubs["get_cluster"] - - @property - def list_clusters( - self, - ) -> Callable[ - [bigtable_instance_admin.ListClustersRequest], - Awaitable[bigtable_instance_admin.ListClustersResponse], - ]: - r"""Return a callable for the list clusters method over gRPC. - - Lists information about clusters in an instance. - - Returns: - Callable[[~.ListClustersRequest], - Awaitable[~.ListClustersResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_clusters" not in self._stubs: - self._stubs["list_clusters"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", - request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, - response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, - ) - return self._stubs["list_clusters"] - - @property - def update_cluster( - self, - ) -> Callable[[instance.Cluster], Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update cluster method over gRPC. - - Updates a cluster within an instance. - - Note that UpdateCluster does not support updating - cluster_config.cluster_autoscaling_config. In order to update - it, you must use PartialUpdateCluster. - - Returns: - Callable[[~.Cluster], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_cluster" not in self._stubs: - self._stubs["update_cluster"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", - request_serializer=instance.Cluster.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_cluster"] - - @property - def partial_update_cluster( - self, - ) -> Callable[ - [bigtable_instance_admin.PartialUpdateClusterRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the partial update cluster method over gRPC. - - Partially updates a cluster within a project. This method is the - preferred way to update a Cluster. - - To enable and update autoscaling, set - cluster_config.cluster_autoscaling_config. When autoscaling is - enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning - that updates to it are ignored. Note that an update cannot - simultaneously set serve_nodes to non-zero and - cluster_config.cluster_autoscaling_config to non-empty, and also - specify both in the update_mask. - - To disable autoscaling, clear - cluster_config.cluster_autoscaling_config, and explicitly set a - serve_node count via the update_mask. - - Returns: - Callable[[~.PartialUpdateClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "partial_update_cluster" not in self._stubs: - self._stubs["partial_update_cluster"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster", - request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["partial_update_cluster"] - - @property - def delete_cluster( - self, - ) -> Callable[ - [bigtable_instance_admin.DeleteClusterRequest], Awaitable[empty_pb2.Empty] - ]: - r"""Return a callable for the delete cluster method over gRPC. - - Deletes a cluster from an instance. - - Returns: - Callable[[~.DeleteClusterRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_cluster" not in self._stubs: - self._stubs["delete_cluster"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", - request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_cluster"] - - @property - def create_app_profile( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateAppProfileRequest], - Awaitable[instance.AppProfile], - ]: - r"""Return a callable for the create app profile method over gRPC. - - Creates an app profile within an instance. - - Returns: - Callable[[~.CreateAppProfileRequest], - Awaitable[~.AppProfile]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_app_profile" not in self._stubs: - self._stubs["create_app_profile"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", - request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, - response_deserializer=instance.AppProfile.deserialize, - ) - return self._stubs["create_app_profile"] - - @property - def get_app_profile( - self, - ) -> Callable[ - [bigtable_instance_admin.GetAppProfileRequest], Awaitable[instance.AppProfile] - ]: - r"""Return a callable for the get app profile method over gRPC. - - Gets information about an app profile. - - Returns: - Callable[[~.GetAppProfileRequest], - Awaitable[~.AppProfile]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_app_profile" not in self._stubs: - self._stubs["get_app_profile"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", - request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, - response_deserializer=instance.AppProfile.deserialize, - ) - return self._stubs["get_app_profile"] - - @property - def list_app_profiles( - self, - ) -> Callable[ - [bigtable_instance_admin.ListAppProfilesRequest], - Awaitable[bigtable_instance_admin.ListAppProfilesResponse], - ]: - r"""Return a callable for the list app profiles method over gRPC. - - Lists information about app profiles in an instance. - - Returns: - Callable[[~.ListAppProfilesRequest], - Awaitable[~.ListAppProfilesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_app_profiles" not in self._stubs: - self._stubs["list_app_profiles"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", - request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, - response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, - ) - return self._stubs["list_app_profiles"] - - @property - def update_app_profile( - self, - ) -> Callable[ - [bigtable_instance_admin.UpdateAppProfileRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the update app profile method over gRPC. - - Updates an app profile within an instance. - - Returns: - Callable[[~.UpdateAppProfileRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_app_profile" not in self._stubs: - self._stubs["update_app_profile"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", - request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_app_profile"] - - @property - def delete_app_profile( - self, - ) -> Callable[ - [bigtable_instance_admin.DeleteAppProfileRequest], Awaitable[empty_pb2.Empty] - ]: - r"""Return a callable for the delete app profile method over gRPC. - - Deletes an app profile from an instance. - - Returns: - Callable[[~.DeleteAppProfileRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_app_profile" not in self._stubs: - self._stubs["delete_app_profile"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", - request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_app_profile"] - - @property - def get_iam_policy( - self, - ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: - r"""Return a callable for the get iam policy method over gRPC. - - Gets the access control policy for an instance - resource. Returns an empty policy if an instance exists - but does not have a policy set. - - Returns: - Callable[[~.GetIamPolicyRequest], - Awaitable[~.Policy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", - request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs["get_iam_policy"] - - @property - def set_iam_policy( - self, - ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: - r"""Return a callable for the set iam policy method over gRPC. - - Sets the access control policy on an instance - resource. Replaces any existing policy. - - Returns: - Callable[[~.SetIamPolicyRequest], - Awaitable[~.Policy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", - request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs["set_iam_policy"] - - @property - def test_iam_permissions( - self, - ) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - Awaitable[iam_policy_pb2.TestIamPermissionsResponse], - ]: - r"""Return a callable for the test iam permissions method over gRPC. - - Returns permissions that the caller has on the - specified instance resource. - - Returns: - Callable[[~.TestIamPermissionsRequest], - Awaitable[~.TestIamPermissionsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", - request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, - ) - return self._stubs["test_iam_permissions"] - - @property - def list_hot_tablets( - self, - ) -> Callable[ - [bigtable_instance_admin.ListHotTabletsRequest], - Awaitable[bigtable_instance_admin.ListHotTabletsResponse], - ]: - r"""Return a callable for the list hot tablets method over gRPC. - - Lists hot tablets in a cluster, within the time range - provided. Hot tablets are ordered based on CPU usage. - - Returns: - Callable[[~.ListHotTabletsRequest], - Awaitable[~.ListHotTabletsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_hot_tablets" not in self._stubs: - self._stubs["list_hot_tablets"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets", - request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, - response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, - ) - return self._stubs["list_hot_tablets"] - - @property - def create_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateLogicalViewRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the create logical view method over gRPC. - - Creates a logical view within an instance. - - Returns: - Callable[[~.CreateLogicalViewRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_logical_view" not in self._stubs: - self._stubs["create_logical_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateLogicalView", - request_serializer=bigtable_instance_admin.CreateLogicalViewRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_logical_view"] - - @property - def get_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.GetLogicalViewRequest], Awaitable[instance.LogicalView] - ]: - r"""Return a callable for the get logical view method over gRPC. - - Gets information about a logical view. - - Returns: - Callable[[~.GetLogicalViewRequest], - Awaitable[~.LogicalView]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_logical_view" not in self._stubs: - self._stubs["get_logical_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetLogicalView", - request_serializer=bigtable_instance_admin.GetLogicalViewRequest.serialize, - response_deserializer=instance.LogicalView.deserialize, - ) - return self._stubs["get_logical_view"] - - @property - def list_logical_views( - self, - ) -> Callable[ - [bigtable_instance_admin.ListLogicalViewsRequest], - Awaitable[bigtable_instance_admin.ListLogicalViewsResponse], - ]: - r"""Return a callable for the list logical views method over gRPC. - - Lists information about logical views in an instance. - - Returns: - Callable[[~.ListLogicalViewsRequest], - Awaitable[~.ListLogicalViewsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_logical_views" not in self._stubs: - self._stubs["list_logical_views"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListLogicalViews", - request_serializer=bigtable_instance_admin.ListLogicalViewsRequest.serialize, - response_deserializer=bigtable_instance_admin.ListLogicalViewsResponse.deserialize, - ) - return self._stubs["list_logical_views"] - - @property - def update_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.UpdateLogicalViewRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the update logical view method over gRPC. - - Updates a logical view within an instance. - - Returns: - Callable[[~.UpdateLogicalViewRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_logical_view" not in self._stubs: - self._stubs["update_logical_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateLogicalView", - request_serializer=bigtable_instance_admin.UpdateLogicalViewRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_logical_view"] - - @property - def delete_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.DeleteLogicalViewRequest], Awaitable[empty_pb2.Empty] - ]: - r"""Return a callable for the delete logical view method over gRPC. - - Deletes a logical view from an instance. - - Returns: - Callable[[~.DeleteLogicalViewRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_logical_view" not in self._stubs: - self._stubs["delete_logical_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteLogicalView", - request_serializer=bigtable_instance_admin.DeleteLogicalViewRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_logical_view"] - - @property - def create_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateMaterializedViewRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the create materialized view method over gRPC. - - Creates a materialized view within an instance. - - Returns: - Callable[[~.CreateMaterializedViewRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_materialized_view" not in self._stubs: - self._stubs["create_materialized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateMaterializedView", - request_serializer=bigtable_instance_admin.CreateMaterializedViewRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_materialized_view"] - - @property - def get_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.GetMaterializedViewRequest], - Awaitable[instance.MaterializedView], - ]: - r"""Return a callable for the get materialized view method over gRPC. - - Gets information about a materialized view. - - Returns: - Callable[[~.GetMaterializedViewRequest], - Awaitable[~.MaterializedView]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_materialized_view" not in self._stubs: - self._stubs["get_materialized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetMaterializedView", - request_serializer=bigtable_instance_admin.GetMaterializedViewRequest.serialize, - response_deserializer=instance.MaterializedView.deserialize, - ) - return self._stubs["get_materialized_view"] - - @property - def list_materialized_views( - self, - ) -> Callable[ - [bigtable_instance_admin.ListMaterializedViewsRequest], - Awaitable[bigtable_instance_admin.ListMaterializedViewsResponse], - ]: - r"""Return a callable for the list materialized views method over gRPC. - - Lists information about materialized views in an - instance. - - Returns: - Callable[[~.ListMaterializedViewsRequest], - Awaitable[~.ListMaterializedViewsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_materialized_views" not in self._stubs: - self._stubs["list_materialized_views"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListMaterializedViews", - request_serializer=bigtable_instance_admin.ListMaterializedViewsRequest.serialize, - response_deserializer=bigtable_instance_admin.ListMaterializedViewsResponse.deserialize, - ) - return self._stubs["list_materialized_views"] - - @property - def update_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.UpdateMaterializedViewRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the update materialized view method over gRPC. - - Updates a materialized view within an instance. - - Returns: - Callable[[~.UpdateMaterializedViewRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_materialized_view" not in self._stubs: - self._stubs["update_materialized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateMaterializedView", - request_serializer=bigtable_instance_admin.UpdateMaterializedViewRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_materialized_view"] - - @property - def delete_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.DeleteMaterializedViewRequest], - Awaitable[empty_pb2.Empty], - ]: - r"""Return a callable for the delete materialized view method over gRPC. - - Deletes a materialized view from an instance. - - Returns: - Callable[[~.DeleteMaterializedViewRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_materialized_view" not in self._stubs: - self._stubs["delete_materialized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteMaterializedView", - request_serializer=bigtable_instance_admin.DeleteMaterializedViewRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_materialized_view"] - - def _prep_wrapped_messages(self, client_info): - """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" - self._wrapped_methods = { - self.create_instance: self._wrap_method( - self.create_instance, - default_timeout=300.0, - client_info=client_info, - ), - self.get_instance: self._wrap_method( - self.get_instance, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_instances: self._wrap_method( - self.list_instances, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_instance: self._wrap_method( - self.update_instance, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.partial_update_instance: self._wrap_method( - self.partial_update_instance, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.delete_instance: self._wrap_method( - self.delete_instance, - default_timeout=60.0, - client_info=client_info, - ), - self.create_cluster: self._wrap_method( - self.create_cluster, - default_timeout=60.0, - client_info=client_info, - ), - self.get_cluster: self._wrap_method( - self.get_cluster, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_clusters: self._wrap_method( - self.list_clusters, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_cluster: self._wrap_method( - self.update_cluster, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.partial_update_cluster: self._wrap_method( - self.partial_update_cluster, - default_timeout=None, - client_info=client_info, - ), - self.delete_cluster: self._wrap_method( - self.delete_cluster, - default_timeout=60.0, - client_info=client_info, - ), - self.create_app_profile: self._wrap_method( - self.create_app_profile, - default_timeout=60.0, - client_info=client_info, - ), - self.get_app_profile: self._wrap_method( - self.get_app_profile, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_app_profiles: self._wrap_method( - self.list_app_profiles, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_app_profile: self._wrap_method( - self.update_app_profile, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.delete_app_profile: self._wrap_method( - self.delete_app_profile, - default_timeout=60.0, - client_info=client_info, - ), - self.get_iam_policy: self._wrap_method( - self.get_iam_policy, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.set_iam_policy: self._wrap_method( - self.set_iam_policy, - default_timeout=60.0, - client_info=client_info, - ), - self.test_iam_permissions: self._wrap_method( - self.test_iam_permissions, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_hot_tablets: self._wrap_method( - self.list_hot_tablets, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.create_logical_view: self._wrap_method( - self.create_logical_view, - default_timeout=None, - client_info=client_info, - ), - self.get_logical_view: self._wrap_method( - self.get_logical_view, - default_timeout=None, - client_info=client_info, - ), - self.list_logical_views: self._wrap_method( - self.list_logical_views, - default_timeout=None, - client_info=client_info, - ), - self.update_logical_view: self._wrap_method( - self.update_logical_view, - default_timeout=None, - client_info=client_info, - ), - self.delete_logical_view: self._wrap_method( - self.delete_logical_view, - default_timeout=None, - client_info=client_info, - ), - self.create_materialized_view: self._wrap_method( - self.create_materialized_view, - default_timeout=None, - client_info=client_info, - ), - self.get_materialized_view: self._wrap_method( - self.get_materialized_view, - default_timeout=None, - client_info=client_info, - ), - self.list_materialized_views: self._wrap_method( - self.list_materialized_views, - default_timeout=None, - client_info=client_info, - ), - self.update_materialized_view: self._wrap_method( - self.update_materialized_view, - default_timeout=None, - client_info=client_info, - ), - self.delete_materialized_view: self._wrap_method( - self.delete_materialized_view, - default_timeout=None, - client_info=client_info, - ), - } - - def _wrap_method(self, func, *args, **kwargs): - if self._wrap_with_kind: # pragma: NO COVER - kwargs["kind"] = self.kind - return gapic_v1.method_async.wrap_method(func, *args, **kwargs) - - def close(self): - return self._logged_channel.close() - - @property - def kind(self) -> str: - return "grpc_asyncio" - - -__all__ = ("BigtableInstanceAdminGrpcAsyncIOTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py deleted file mode 100644 index 12af0792b..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ /dev/null @@ -1,6824 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import logging -import json # type: ignore - -from google.auth.transport.requests import AuthorizedSession # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.api_core import exceptions as core_exceptions -from google.api_core import retry as retries -from google.api_core import rest_helpers -from google.api_core import rest_streaming -from google.api_core import gapic_v1 -import google.protobuf - -from google.protobuf import json_format -from google.api_core import operations_v1 - -from requests import __version__ as requests_version -import dataclasses -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -import warnings - - -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore - - -from .rest_base import _BaseBigtableInstanceAdminRestTransport -from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - -try: - from google.api_core import client_logging # type: ignore - - CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER -except ImportError: # pragma: NO COVER - CLIENT_LOGGING_SUPPORTED = False - -_LOGGER = logging.getLogger(__name__) - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, - grpc_version=None, - rest_version=f"requests@{requests_version}", -) - -if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER - DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ - - -class BigtableInstanceAdminRestInterceptor: - """Interceptor for BigtableInstanceAdmin. - - Interceptors are used to manipulate requests, request metadata, and responses - in arbitrary ways. - Example use cases include: - * Logging - * Verifying requests according to service or custom semantics - * Stripping extraneous information from responses - - These use cases and more can be enabled by injecting an - instance of a custom subclass when constructing the BigtableInstanceAdminRestTransport. - - .. code-block:: python - class MyCustomBigtableInstanceAdminInterceptor(BigtableInstanceAdminRestInterceptor): - def pre_create_app_profile(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_app_profile(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_cluster(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_cluster(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_instance(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_instance(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_logical_view(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_logical_view(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_materialized_view(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_materialized_view(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_delete_app_profile(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_delete_cluster(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_delete_instance(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_delete_logical_view(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_delete_materialized_view(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_get_app_profile(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_app_profile(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_cluster(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_cluster(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_iam_policy(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_iam_policy(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_instance(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_instance(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_logical_view(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_logical_view(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_materialized_view(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_materialized_view(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_app_profiles(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_app_profiles(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_clusters(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_clusters(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_hot_tablets(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_hot_tablets(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_instances(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_instances(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_logical_views(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_logical_views(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_materialized_views(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_materialized_views(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_partial_update_cluster(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_partial_update_cluster(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_partial_update_instance(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_partial_update_instance(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_set_iam_policy(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_set_iam_policy(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_test_iam_permissions(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_test_iam_permissions(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_app_profile(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_app_profile(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_cluster(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_cluster(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_instance(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_instance(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_logical_view(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_logical_view(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_materialized_view(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_materialized_view(self, response): - logging.log(f"Received response: {response}") - return response - - transport = BigtableInstanceAdminRestTransport(interceptor=MyCustomBigtableInstanceAdminInterceptor()) - client = BigtableInstanceAdminClient(transport=transport) - - - """ - - def pre_create_app_profile( - self, - request: bigtable_instance_admin.CreateAppProfileRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.CreateAppProfileRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for create_app_profile - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_create_app_profile( - self, response: instance.AppProfile - ) -> instance.AppProfile: - """Post-rpc interceptor for create_app_profile - - DEPRECATED. Please use the `post_create_app_profile_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_create_app_profile` interceptor runs - before the `post_create_app_profile_with_metadata` interceptor. - """ - return response - - def post_create_app_profile_with_metadata( - self, - response: instance.AppProfile, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[instance.AppProfile, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for create_app_profile - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_create_app_profile_with_metadata` - interceptor in new development instead of the `post_create_app_profile` interceptor. - When both interceptors are used, this `post_create_app_profile_with_metadata` interceptor runs after the - `post_create_app_profile` interceptor. The (possibly modified) response returned by - `post_create_app_profile` will be passed to - `post_create_app_profile_with_metadata`. - """ - return response, metadata - - def pre_create_cluster( - self, - request: bigtable_instance_admin.CreateClusterRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.CreateClusterRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for create_cluster - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_create_cluster( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for create_cluster - - DEPRECATED. Please use the `post_create_cluster_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_create_cluster` interceptor runs - before the `post_create_cluster_with_metadata` interceptor. - """ - return response - - def post_create_cluster_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for create_cluster - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_create_cluster_with_metadata` - interceptor in new development instead of the `post_create_cluster` interceptor. - When both interceptors are used, this `post_create_cluster_with_metadata` interceptor runs after the - `post_create_cluster` interceptor. The (possibly modified) response returned by - `post_create_cluster` will be passed to - `post_create_cluster_with_metadata`. - """ - return response, metadata - - def pre_create_instance( - self, - request: bigtable_instance_admin.CreateInstanceRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.CreateInstanceRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for create_instance - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_create_instance( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for create_instance - - DEPRECATED. Please use the `post_create_instance_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_create_instance` interceptor runs - before the `post_create_instance_with_metadata` interceptor. - """ - return response - - def post_create_instance_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for create_instance - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_create_instance_with_metadata` - interceptor in new development instead of the `post_create_instance` interceptor. - When both interceptors are used, this `post_create_instance_with_metadata` interceptor runs after the - `post_create_instance` interceptor. The (possibly modified) response returned by - `post_create_instance` will be passed to - `post_create_instance_with_metadata`. - """ - return response, metadata - - def pre_create_logical_view( - self, - request: bigtable_instance_admin.CreateLogicalViewRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.CreateLogicalViewRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for create_logical_view - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_create_logical_view( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for create_logical_view - - DEPRECATED. Please use the `post_create_logical_view_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_create_logical_view` interceptor runs - before the `post_create_logical_view_with_metadata` interceptor. - """ - return response - - def post_create_logical_view_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for create_logical_view - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_create_logical_view_with_metadata` - interceptor in new development instead of the `post_create_logical_view` interceptor. - When both interceptors are used, this `post_create_logical_view_with_metadata` interceptor runs after the - `post_create_logical_view` interceptor. The (possibly modified) response returned by - `post_create_logical_view` will be passed to - `post_create_logical_view_with_metadata`. - """ - return response, metadata - - def pre_create_materialized_view( - self, - request: bigtable_instance_admin.CreateMaterializedViewRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.CreateMaterializedViewRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for create_materialized_view - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_create_materialized_view( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for create_materialized_view - - DEPRECATED. Please use the `post_create_materialized_view_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_create_materialized_view` interceptor runs - before the `post_create_materialized_view_with_metadata` interceptor. - """ - return response - - def post_create_materialized_view_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for create_materialized_view - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_create_materialized_view_with_metadata` - interceptor in new development instead of the `post_create_materialized_view` interceptor. - When both interceptors are used, this `post_create_materialized_view_with_metadata` interceptor runs after the - `post_create_materialized_view` interceptor. The (possibly modified) response returned by - `post_create_materialized_view` will be passed to - `post_create_materialized_view_with_metadata`. - """ - return response, metadata - - def pre_delete_app_profile( - self, - request: bigtable_instance_admin.DeleteAppProfileRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.DeleteAppProfileRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for delete_app_profile - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def pre_delete_cluster( - self, - request: bigtable_instance_admin.DeleteClusterRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.DeleteClusterRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for delete_cluster - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def pre_delete_instance( - self, - request: bigtable_instance_admin.DeleteInstanceRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.DeleteInstanceRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for delete_instance - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def pre_delete_logical_view( - self, - request: bigtable_instance_admin.DeleteLogicalViewRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.DeleteLogicalViewRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for delete_logical_view - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def pre_delete_materialized_view( - self, - request: bigtable_instance_admin.DeleteMaterializedViewRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.DeleteMaterializedViewRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for delete_materialized_view - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def pre_get_app_profile( - self, - request: bigtable_instance_admin.GetAppProfileRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.GetAppProfileRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for get_app_profile - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_get_app_profile( - self, response: instance.AppProfile - ) -> instance.AppProfile: - """Post-rpc interceptor for get_app_profile - - DEPRECATED. Please use the `post_get_app_profile_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_get_app_profile` interceptor runs - before the `post_get_app_profile_with_metadata` interceptor. - """ - return response - - def post_get_app_profile_with_metadata( - self, - response: instance.AppProfile, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[instance.AppProfile, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_app_profile - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_get_app_profile_with_metadata` - interceptor in new development instead of the `post_get_app_profile` interceptor. - When both interceptors are used, this `post_get_app_profile_with_metadata` interceptor runs after the - `post_get_app_profile` interceptor. The (possibly modified) response returned by - `post_get_app_profile` will be passed to - `post_get_app_profile_with_metadata`. - """ - return response, metadata - - def pre_get_cluster( - self, - request: bigtable_instance_admin.GetClusterRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.GetClusterRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for get_cluster - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_get_cluster(self, response: instance.Cluster) -> instance.Cluster: - """Post-rpc interceptor for get_cluster - - DEPRECATED. Please use the `post_get_cluster_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_get_cluster` interceptor runs - before the `post_get_cluster_with_metadata` interceptor. - """ - return response - - def post_get_cluster_with_metadata( - self, - response: instance.Cluster, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[instance.Cluster, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_cluster - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_get_cluster_with_metadata` - interceptor in new development instead of the `post_get_cluster` interceptor. - When both interceptors are used, this `post_get_cluster_with_metadata` interceptor runs after the - `post_get_cluster` interceptor. The (possibly modified) response returned by - `post_get_cluster` will be passed to - `post_get_cluster_with_metadata`. - """ - return response, metadata - - def pre_get_iam_policy( - self, - request: iam_policy_pb2.GetIamPolicyRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for get_iam_policy - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: - """Post-rpc interceptor for get_iam_policy - - DEPRECATED. Please use the `post_get_iam_policy_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_get_iam_policy` interceptor runs - before the `post_get_iam_policy_with_metadata` interceptor. - """ - return response - - def post_get_iam_policy_with_metadata( - self, - response: policy_pb2.Policy, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_iam_policy - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_get_iam_policy_with_metadata` - interceptor in new development instead of the `post_get_iam_policy` interceptor. - When both interceptors are used, this `post_get_iam_policy_with_metadata` interceptor runs after the - `post_get_iam_policy` interceptor. The (possibly modified) response returned by - `post_get_iam_policy` will be passed to - `post_get_iam_policy_with_metadata`. - """ - return response, metadata - - def pre_get_instance( - self, - request: bigtable_instance_admin.GetInstanceRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.GetInstanceRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for get_instance - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_get_instance(self, response: instance.Instance) -> instance.Instance: - """Post-rpc interceptor for get_instance - - DEPRECATED. Please use the `post_get_instance_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_get_instance` interceptor runs - before the `post_get_instance_with_metadata` interceptor. - """ - return response - - def post_get_instance_with_metadata( - self, - response: instance.Instance, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[instance.Instance, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_instance - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_get_instance_with_metadata` - interceptor in new development instead of the `post_get_instance` interceptor. - When both interceptors are used, this `post_get_instance_with_metadata` interceptor runs after the - `post_get_instance` interceptor. The (possibly modified) response returned by - `post_get_instance` will be passed to - `post_get_instance_with_metadata`. - """ - return response, metadata - - def pre_get_logical_view( - self, - request: bigtable_instance_admin.GetLogicalViewRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.GetLogicalViewRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for get_logical_view - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_get_logical_view( - self, response: instance.LogicalView - ) -> instance.LogicalView: - """Post-rpc interceptor for get_logical_view - - DEPRECATED. Please use the `post_get_logical_view_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_get_logical_view` interceptor runs - before the `post_get_logical_view_with_metadata` interceptor. - """ - return response - - def post_get_logical_view_with_metadata( - self, - response: instance.LogicalView, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[instance.LogicalView, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_logical_view - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_get_logical_view_with_metadata` - interceptor in new development instead of the `post_get_logical_view` interceptor. - When both interceptors are used, this `post_get_logical_view_with_metadata` interceptor runs after the - `post_get_logical_view` interceptor. The (possibly modified) response returned by - `post_get_logical_view` will be passed to - `post_get_logical_view_with_metadata`. - """ - return response, metadata - - def pre_get_materialized_view( - self, - request: bigtable_instance_admin.GetMaterializedViewRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.GetMaterializedViewRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for get_materialized_view - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_get_materialized_view( - self, response: instance.MaterializedView - ) -> instance.MaterializedView: - """Post-rpc interceptor for get_materialized_view - - DEPRECATED. Please use the `post_get_materialized_view_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_get_materialized_view` interceptor runs - before the `post_get_materialized_view_with_metadata` interceptor. - """ - return response - - def post_get_materialized_view_with_metadata( - self, - response: instance.MaterializedView, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[instance.MaterializedView, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_materialized_view - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_get_materialized_view_with_metadata` - interceptor in new development instead of the `post_get_materialized_view` interceptor. - When both interceptors are used, this `post_get_materialized_view_with_metadata` interceptor runs after the - `post_get_materialized_view` interceptor. The (possibly modified) response returned by - `post_get_materialized_view` will be passed to - `post_get_materialized_view_with_metadata`. - """ - return response, metadata - - def pre_list_app_profiles( - self, - request: bigtable_instance_admin.ListAppProfilesRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.ListAppProfilesRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for list_app_profiles - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_list_app_profiles( - self, response: bigtable_instance_admin.ListAppProfilesResponse - ) -> bigtable_instance_admin.ListAppProfilesResponse: - """Post-rpc interceptor for list_app_profiles - - DEPRECATED. Please use the `post_list_app_profiles_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_list_app_profiles` interceptor runs - before the `post_list_app_profiles_with_metadata` interceptor. - """ - return response - - def post_list_app_profiles_with_metadata( - self, - response: bigtable_instance_admin.ListAppProfilesResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.ListAppProfilesResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_app_profiles - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_list_app_profiles_with_metadata` - interceptor in new development instead of the `post_list_app_profiles` interceptor. - When both interceptors are used, this `post_list_app_profiles_with_metadata` interceptor runs after the - `post_list_app_profiles` interceptor. The (possibly modified) response returned by - `post_list_app_profiles` will be passed to - `post_list_app_profiles_with_metadata`. - """ - return response, metadata - - def pre_list_clusters( - self, - request: bigtable_instance_admin.ListClustersRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.ListClustersRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for list_clusters - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_list_clusters( - self, response: bigtable_instance_admin.ListClustersResponse - ) -> bigtable_instance_admin.ListClustersResponse: - """Post-rpc interceptor for list_clusters - - DEPRECATED. Please use the `post_list_clusters_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_list_clusters` interceptor runs - before the `post_list_clusters_with_metadata` interceptor. - """ - return response - - def post_list_clusters_with_metadata( - self, - response: bigtable_instance_admin.ListClustersResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.ListClustersResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_clusters - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_list_clusters_with_metadata` - interceptor in new development instead of the `post_list_clusters` interceptor. - When both interceptors are used, this `post_list_clusters_with_metadata` interceptor runs after the - `post_list_clusters` interceptor. The (possibly modified) response returned by - `post_list_clusters` will be passed to - `post_list_clusters_with_metadata`. - """ - return response, metadata - - def pre_list_hot_tablets( - self, - request: bigtable_instance_admin.ListHotTabletsRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.ListHotTabletsRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for list_hot_tablets - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_list_hot_tablets( - self, response: bigtable_instance_admin.ListHotTabletsResponse - ) -> bigtable_instance_admin.ListHotTabletsResponse: - """Post-rpc interceptor for list_hot_tablets - - DEPRECATED. Please use the `post_list_hot_tablets_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_list_hot_tablets` interceptor runs - before the `post_list_hot_tablets_with_metadata` interceptor. - """ - return response - - def post_list_hot_tablets_with_metadata( - self, - response: bigtable_instance_admin.ListHotTabletsResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.ListHotTabletsResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_hot_tablets - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_list_hot_tablets_with_metadata` - interceptor in new development instead of the `post_list_hot_tablets` interceptor. - When both interceptors are used, this `post_list_hot_tablets_with_metadata` interceptor runs after the - `post_list_hot_tablets` interceptor. The (possibly modified) response returned by - `post_list_hot_tablets` will be passed to - `post_list_hot_tablets_with_metadata`. - """ - return response, metadata - - def pre_list_instances( - self, - request: bigtable_instance_admin.ListInstancesRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.ListInstancesRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for list_instances - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_list_instances( - self, response: bigtable_instance_admin.ListInstancesResponse - ) -> bigtable_instance_admin.ListInstancesResponse: - """Post-rpc interceptor for list_instances - - DEPRECATED. Please use the `post_list_instances_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_list_instances` interceptor runs - before the `post_list_instances_with_metadata` interceptor. - """ - return response - - def post_list_instances_with_metadata( - self, - response: bigtable_instance_admin.ListInstancesResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.ListInstancesResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_instances - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_list_instances_with_metadata` - interceptor in new development instead of the `post_list_instances` interceptor. - When both interceptors are used, this `post_list_instances_with_metadata` interceptor runs after the - `post_list_instances` interceptor. The (possibly modified) response returned by - `post_list_instances` will be passed to - `post_list_instances_with_metadata`. - """ - return response, metadata - - def pre_list_logical_views( - self, - request: bigtable_instance_admin.ListLogicalViewsRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.ListLogicalViewsRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for list_logical_views - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_list_logical_views( - self, response: bigtable_instance_admin.ListLogicalViewsResponse - ) -> bigtable_instance_admin.ListLogicalViewsResponse: - """Post-rpc interceptor for list_logical_views - - DEPRECATED. Please use the `post_list_logical_views_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_list_logical_views` interceptor runs - before the `post_list_logical_views_with_metadata` interceptor. - """ - return response - - def post_list_logical_views_with_metadata( - self, - response: bigtable_instance_admin.ListLogicalViewsResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.ListLogicalViewsResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_logical_views - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_list_logical_views_with_metadata` - interceptor in new development instead of the `post_list_logical_views` interceptor. - When both interceptors are used, this `post_list_logical_views_with_metadata` interceptor runs after the - `post_list_logical_views` interceptor. The (possibly modified) response returned by - `post_list_logical_views` will be passed to - `post_list_logical_views_with_metadata`. - """ - return response, metadata - - def pre_list_materialized_views( - self, - request: bigtable_instance_admin.ListMaterializedViewsRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.ListMaterializedViewsRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for list_materialized_views - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_list_materialized_views( - self, response: bigtable_instance_admin.ListMaterializedViewsResponse - ) -> bigtable_instance_admin.ListMaterializedViewsResponse: - """Post-rpc interceptor for list_materialized_views - - DEPRECATED. Please use the `post_list_materialized_views_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_list_materialized_views` interceptor runs - before the `post_list_materialized_views_with_metadata` interceptor. - """ - return response - - def post_list_materialized_views_with_metadata( - self, - response: bigtable_instance_admin.ListMaterializedViewsResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.ListMaterializedViewsResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_materialized_views - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_list_materialized_views_with_metadata` - interceptor in new development instead of the `post_list_materialized_views` interceptor. - When both interceptors are used, this `post_list_materialized_views_with_metadata` interceptor runs after the - `post_list_materialized_views` interceptor. The (possibly modified) response returned by - `post_list_materialized_views` will be passed to - `post_list_materialized_views_with_metadata`. - """ - return response, metadata - - def pre_partial_update_cluster( - self, - request: bigtable_instance_admin.PartialUpdateClusterRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.PartialUpdateClusterRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for partial_update_cluster - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_partial_update_cluster( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for partial_update_cluster - - DEPRECATED. Please use the `post_partial_update_cluster_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_partial_update_cluster` interceptor runs - before the `post_partial_update_cluster_with_metadata` interceptor. - """ - return response - - def post_partial_update_cluster_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for partial_update_cluster - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_partial_update_cluster_with_metadata` - interceptor in new development instead of the `post_partial_update_cluster` interceptor. - When both interceptors are used, this `post_partial_update_cluster_with_metadata` interceptor runs after the - `post_partial_update_cluster` interceptor. The (possibly modified) response returned by - `post_partial_update_cluster` will be passed to - `post_partial_update_cluster_with_metadata`. - """ - return response, metadata - - def pre_partial_update_instance( - self, - request: bigtable_instance_admin.PartialUpdateInstanceRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.PartialUpdateInstanceRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for partial_update_instance - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_partial_update_instance( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for partial_update_instance - - DEPRECATED. Please use the `post_partial_update_instance_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_partial_update_instance` interceptor runs - before the `post_partial_update_instance_with_metadata` interceptor. - """ - return response - - def post_partial_update_instance_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for partial_update_instance - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_partial_update_instance_with_metadata` - interceptor in new development instead of the `post_partial_update_instance` interceptor. - When both interceptors are used, this `post_partial_update_instance_with_metadata` interceptor runs after the - `post_partial_update_instance` interceptor. The (possibly modified) response returned by - `post_partial_update_instance` will be passed to - `post_partial_update_instance_with_metadata`. - """ - return response, metadata - - def pre_set_iam_policy( - self, - request: iam_policy_pb2.SetIamPolicyRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for set_iam_policy - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: - """Post-rpc interceptor for set_iam_policy - - DEPRECATED. Please use the `post_set_iam_policy_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_set_iam_policy` interceptor runs - before the `post_set_iam_policy_with_metadata` interceptor. - """ - return response - - def post_set_iam_policy_with_metadata( - self, - response: policy_pb2.Policy, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for set_iam_policy - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_set_iam_policy_with_metadata` - interceptor in new development instead of the `post_set_iam_policy` interceptor. - When both interceptors are used, this `post_set_iam_policy_with_metadata` interceptor runs after the - `post_set_iam_policy` interceptor. The (possibly modified) response returned by - `post_set_iam_policy` will be passed to - `post_set_iam_policy_with_metadata`. - """ - return response, metadata - - def pre_test_iam_permissions( - self, - request: iam_policy_pb2.TestIamPermissionsRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - iam_policy_pb2.TestIamPermissionsRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for test_iam_permissions - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_test_iam_permissions( - self, response: iam_policy_pb2.TestIamPermissionsResponse - ) -> iam_policy_pb2.TestIamPermissionsResponse: - """Post-rpc interceptor for test_iam_permissions - - DEPRECATED. Please use the `post_test_iam_permissions_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_test_iam_permissions` interceptor runs - before the `post_test_iam_permissions_with_metadata` interceptor. - """ - return response - - def post_test_iam_permissions_with_metadata( - self, - response: iam_policy_pb2.TestIamPermissionsResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - iam_policy_pb2.TestIamPermissionsResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for test_iam_permissions - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_test_iam_permissions_with_metadata` - interceptor in new development instead of the `post_test_iam_permissions` interceptor. - When both interceptors are used, this `post_test_iam_permissions_with_metadata` interceptor runs after the - `post_test_iam_permissions` interceptor. The (possibly modified) response returned by - `post_test_iam_permissions` will be passed to - `post_test_iam_permissions_with_metadata`. - """ - return response, metadata - - def pre_update_app_profile( - self, - request: bigtable_instance_admin.UpdateAppProfileRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.UpdateAppProfileRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for update_app_profile - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_update_app_profile( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for update_app_profile - - DEPRECATED. Please use the `post_update_app_profile_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_update_app_profile` interceptor runs - before the `post_update_app_profile_with_metadata` interceptor. - """ - return response - - def post_update_app_profile_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for update_app_profile - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_update_app_profile_with_metadata` - interceptor in new development instead of the `post_update_app_profile` interceptor. - When both interceptors are used, this `post_update_app_profile_with_metadata` interceptor runs after the - `post_update_app_profile` interceptor. The (possibly modified) response returned by - `post_update_app_profile` will be passed to - `post_update_app_profile_with_metadata`. - """ - return response, metadata - - def pre_update_cluster( - self, - request: instance.Cluster, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[instance.Cluster, Sequence[Tuple[str, Union[str, bytes]]]]: - """Pre-rpc interceptor for update_cluster - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_update_cluster( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for update_cluster - - DEPRECATED. Please use the `post_update_cluster_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_update_cluster` interceptor runs - before the `post_update_cluster_with_metadata` interceptor. - """ - return response - - def post_update_cluster_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for update_cluster - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_update_cluster_with_metadata` - interceptor in new development instead of the `post_update_cluster` interceptor. - When both interceptors are used, this `post_update_cluster_with_metadata` interceptor runs after the - `post_update_cluster` interceptor. The (possibly modified) response returned by - `post_update_cluster` will be passed to - `post_update_cluster_with_metadata`. - """ - return response, metadata - - def pre_update_instance( - self, - request: instance.Instance, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[instance.Instance, Sequence[Tuple[str, Union[str, bytes]]]]: - """Pre-rpc interceptor for update_instance - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_update_instance(self, response: instance.Instance) -> instance.Instance: - """Post-rpc interceptor for update_instance - - DEPRECATED. Please use the `post_update_instance_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_update_instance` interceptor runs - before the `post_update_instance_with_metadata` interceptor. - """ - return response - - def post_update_instance_with_metadata( - self, - response: instance.Instance, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[instance.Instance, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for update_instance - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_update_instance_with_metadata` - interceptor in new development instead of the `post_update_instance` interceptor. - When both interceptors are used, this `post_update_instance_with_metadata` interceptor runs after the - `post_update_instance` interceptor. The (possibly modified) response returned by - `post_update_instance` will be passed to - `post_update_instance_with_metadata`. - """ - return response, metadata - - def pre_update_logical_view( - self, - request: bigtable_instance_admin.UpdateLogicalViewRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.UpdateLogicalViewRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for update_logical_view - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_update_logical_view( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for update_logical_view - - DEPRECATED. Please use the `post_update_logical_view_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_update_logical_view` interceptor runs - before the `post_update_logical_view_with_metadata` interceptor. - """ - return response - - def post_update_logical_view_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for update_logical_view - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_update_logical_view_with_metadata` - interceptor in new development instead of the `post_update_logical_view` interceptor. - When both interceptors are used, this `post_update_logical_view_with_metadata` interceptor runs after the - `post_update_logical_view` interceptor. The (possibly modified) response returned by - `post_update_logical_view` will be passed to - `post_update_logical_view_with_metadata`. - """ - return response, metadata - - def pre_update_materialized_view( - self, - request: bigtable_instance_admin.UpdateMaterializedViewRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_instance_admin.UpdateMaterializedViewRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for update_materialized_view - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_update_materialized_view( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for update_materialized_view - - DEPRECATED. Please use the `post_update_materialized_view_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. This `post_update_materialized_view` interceptor runs - before the `post_update_materialized_view_with_metadata` interceptor. - """ - return response - - def post_update_materialized_view_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for update_materialized_view - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableInstanceAdmin server but before it is returned to user code. - - We recommend only using this `post_update_materialized_view_with_metadata` - interceptor in new development instead of the `post_update_materialized_view` interceptor. - When both interceptors are used, this `post_update_materialized_view_with_metadata` interceptor runs after the - `post_update_materialized_view` interceptor. The (possibly modified) response returned by - `post_update_materialized_view` will be passed to - `post_update_materialized_view_with_metadata`. - """ - return response, metadata - - -@dataclasses.dataclass -class BigtableInstanceAdminRestStub: - _session: AuthorizedSession - _host: str - _interceptor: BigtableInstanceAdminRestInterceptor - - -class BigtableInstanceAdminRestTransport(_BaseBigtableInstanceAdminRestTransport): - """REST backend synchronous transport for BigtableInstanceAdmin. - - Service for creating, configuring, and deleting Cloud - Bigtable Instances and Clusters. Provides access to the Instance - and Cluster schemas only, not the tables' metadata or data - stored in those tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends JSON representations of protocol buffers over HTTP/1.1 - """ - - def __init__( - self, - *, - host: str = "bigtableadmin.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = "https", - interceptor: Optional[BigtableInstanceAdminRestInterceptor] = None, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. - """ - # Run the base constructor - # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. - # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the - # credentials object - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - url_scheme=url_scheme, - api_audience=api_audience, - ) - self._session = AuthorizedSession( - self._credentials, default_host=self.DEFAULT_HOST - ) - self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None - if client_cert_source_for_mtls: - self._session.configure_mtls_channel(client_cert_source_for_mtls) - self._interceptor = interceptor or BigtableInstanceAdminRestInterceptor() - self._prep_wrapped_messages(client_info) - - @property - def operations_client(self) -> operations_v1.AbstractOperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Only create a new client if we do not already have one. - if self._operations_client is None: - http_options: Dict[str, List[Dict[str, str]]] = { - "google.longrunning.Operations.CancelOperation": [ - { - "method": "post", - "uri": "/v2/{name=operations/**}:cancel", - }, - ], - "google.longrunning.Operations.DeleteOperation": [ - { - "method": "delete", - "uri": "/v2/{name=operations/**}", - }, - ], - "google.longrunning.Operations.GetOperation": [ - { - "method": "get", - "uri": "/v2/{name=operations/**}", - }, - ], - "google.longrunning.Operations.ListOperations": [ - { - "method": "get", - "uri": "/v2/{name=operations/projects/**}/operations", - }, - ], - } - - rest_transport = operations_v1.OperationsRestTransport( - host=self._host, - # use the credentials which are saved - credentials=self._credentials, - scopes=self._scopes, - http_options=http_options, - path_prefix="v2", - ) - - self._operations_client = operations_v1.AbstractOperationsClient( - transport=rest_transport - ) - - # Return the client from cache. - return self._operations_client - - class _CreateAppProfile( - _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.CreateAppProfile") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.CreateAppProfileRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.AppProfile: - r"""Call the create app profile method over HTTP. - - Args: - request (~.bigtable_instance_admin.CreateAppProfileRequest): - The request object. Request message for - BigtableInstanceAdmin.CreateAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.instance.AppProfile: - A configuration object describing how - Cloud Bigtable should treat traffic from - a particular end user application. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_http_options() - ) - - request, metadata = self._interceptor.pre_create_app_profile( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateAppProfile", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "CreateAppProfile", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._CreateAppProfile._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = instance.AppProfile() - pb_resp = instance.AppProfile.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_create_app_profile(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_app_profile_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = instance.AppProfile.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_app_profile", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "CreateAppProfile", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _CreateCluster( - _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.CreateCluster") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.CreateClusterRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the create cluster method over HTTP. - - Args: - request (~.bigtable_instance_admin.CreateClusterRequest): - The request object. Request message for - BigtableInstanceAdmin.CreateCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_http_options() - ) - - request, metadata = self._interceptor.pre_create_cluster(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateCluster", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "CreateCluster", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._CreateCluster._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_create_cluster(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_cluster_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_cluster", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "CreateCluster", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _CreateInstance( - _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.CreateInstance") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.CreateInstanceRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the create instance method over HTTP. - - Args: - request (~.bigtable_instance_admin.CreateInstanceRequest): - The request object. Request message for - BigtableInstanceAdmin.CreateInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_http_options() - ) - - request, metadata = self._interceptor.pre_create_instance(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateInstance", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "CreateInstance", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._CreateInstance._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_create_instance(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_instance_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_instance", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "CreateInstance", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _CreateLogicalView( - _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.CreateLogicalView") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.CreateLogicalViewRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the create logical view method over HTTP. - - Args: - request (~.bigtable_instance_admin.CreateLogicalViewRequest): - The request object. Request message for - BigtableInstanceAdmin.CreateLogicalView. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_http_options() - ) - - request, metadata = self._interceptor.pre_create_logical_view( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateLogicalView", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "CreateLogicalView", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._CreateLogicalView._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_create_logical_view(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_logical_view_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_logical_view", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "CreateLogicalView", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _CreateMaterializedView( - _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.CreateMaterializedView") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.CreateMaterializedViewRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the create materialized view method over HTTP. - - Args: - request (~.bigtable_instance_admin.CreateMaterializedViewRequest): - The request object. Request message for - BigtableInstanceAdmin.CreateMaterializedView. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_http_options() - ) - - request, metadata = self._interceptor.pre_create_materialized_view( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateMaterializedView", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "CreateMaterializedView", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._CreateMaterializedView._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_create_materialized_view(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_materialized_view_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_materialized_view", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "CreateMaterializedView", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _DeleteAppProfile( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.DeleteAppProfile") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.DeleteAppProfileRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ): - r"""Call the delete app profile method over HTTP. - - Args: - request (~.bigtable_instance_admin.DeleteAppProfileRequest): - The request object. Request message for - BigtableInstanceAdmin.DeleteAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_http_options() - ) - - request, metadata = self._interceptor.pre_delete_app_profile( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteAppProfile", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "DeleteAppProfile", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._DeleteAppProfile._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DeleteCluster( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.DeleteCluster") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.DeleteClusterRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ): - r"""Call the delete cluster method over HTTP. - - Args: - request (~.bigtable_instance_admin.DeleteClusterRequest): - The request object. Request message for - BigtableInstanceAdmin.DeleteCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_http_options() - ) - - request, metadata = self._interceptor.pre_delete_cluster(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteCluster", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "DeleteCluster", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._DeleteCluster._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DeleteInstance( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.DeleteInstance") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.DeleteInstanceRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ): - r"""Call the delete instance method over HTTP. - - Args: - request (~.bigtable_instance_admin.DeleteInstanceRequest): - The request object. Request message for - BigtableInstanceAdmin.DeleteInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_http_options() - ) - - request, metadata = self._interceptor.pre_delete_instance(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteInstance", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "DeleteInstance", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._DeleteInstance._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DeleteLogicalView( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.DeleteLogicalView") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.DeleteLogicalViewRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ): - r"""Call the delete logical view method over HTTP. - - Args: - request (~.bigtable_instance_admin.DeleteLogicalViewRequest): - The request object. Request message for - BigtableInstanceAdmin.DeleteLogicalView. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView._get_http_options() - ) - - request, metadata = self._interceptor.pre_delete_logical_view( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteLogicalView", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "DeleteLogicalView", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._DeleteLogicalView._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DeleteMaterializedView( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.DeleteMaterializedView") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.DeleteMaterializedViewRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ): - r"""Call the delete materialized view method over HTTP. - - Args: - request (~.bigtable_instance_admin.DeleteMaterializedViewRequest): - The request object. Request message for - BigtableInstanceAdmin.DeleteMaterializedView. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView._get_http_options() - ) - - request, metadata = self._interceptor.pre_delete_materialized_view( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteMaterializedView", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "DeleteMaterializedView", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._DeleteMaterializedView._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _GetAppProfile( - _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.GetAppProfile") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.GetAppProfileRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.AppProfile: - r"""Call the get app profile method over HTTP. - - Args: - request (~.bigtable_instance_admin.GetAppProfileRequest): - The request object. Request message for - BigtableInstanceAdmin.GetAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.instance.AppProfile: - A configuration object describing how - Cloud Bigtable should treat traffic from - a particular end user application. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_http_options() - ) - - request, metadata = self._interceptor.pre_get_app_profile(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetAppProfile", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "GetAppProfile", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._GetAppProfile._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = instance.AppProfile() - pb_resp = instance.AppProfile.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_get_app_profile(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_app_profile_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = instance.AppProfile.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_app_profile", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "GetAppProfile", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _GetCluster( - _BaseBigtableInstanceAdminRestTransport._BaseGetCluster, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.GetCluster") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.GetClusterRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.Cluster: - r"""Call the get cluster method over HTTP. - - Args: - request (~.bigtable_instance_admin.GetClusterRequest): - The request object. Request message for - BigtableInstanceAdmin.GetCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.instance.Cluster: - A resizable group of nodes in a particular cloud - location, capable of serving all - [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_http_options() - ) - - request, metadata = self._interceptor.pre_get_cluster(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetCluster", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "GetCluster", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._GetCluster._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = instance.Cluster() - pb_resp = instance.Cluster.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_get_cluster(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_cluster_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = instance.Cluster.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_cluster", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "GetCluster", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _GetIamPolicy( - _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.GetIamPolicy") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: iam_policy_pb2.GetIamPolicyRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. - - Args: - request (~.iam_policy_pb2.GetIamPolicyRequest): - The request object. Request message for ``GetIamPolicy`` method. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which - specifies access controls for Google Cloud resources. - - A ``Policy`` is a collection of ``bindings``. A - ``binding`` binds one or more ``members``, or - principals, to a single ``role``. Principals can be user - accounts, service accounts, Google groups, and domains - (such as G Suite). A ``role`` is a named list of - permissions; each ``role`` can be an IAM predefined role - or a user-created custom role. - - For some types of Google Cloud resources, a ``binding`` - can also specify a ``condition``, which is a logical - expression that allows access to a resource only if the - expression evaluates to ``true``. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the `IAM - documentation `__. - - **JSON example:** - - :: - - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } - - **YAML example:** - - :: - - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 - - For a description of IAM and its features, see the `IAM - documentation `__. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_http_options() - ) - - request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetIamPolicy", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "GetIamPolicy", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._GetIamPolicy._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = policy_pb2.Policy() - pb_resp = resp - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_get_iam_policy(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_iam_policy_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_iam_policy", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "GetIamPolicy", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _GetInstance( - _BaseBigtableInstanceAdminRestTransport._BaseGetInstance, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.GetInstance") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.GetInstanceRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.Instance: - r"""Call the get instance method over HTTP. - - Args: - request (~.bigtable_instance_admin.GetInstanceRequest): - The request object. Request message for - BigtableInstanceAdmin.GetInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.instance.Instance: - A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_http_options() - ) - - request, metadata = self._interceptor.pre_get_instance(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetInstance", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "GetInstance", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._GetInstance._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = instance.Instance() - pb_resp = instance.Instance.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_get_instance(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_instance_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = instance.Instance.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_instance", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "GetInstance", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _GetLogicalView( - _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.GetLogicalView") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.GetLogicalViewRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.LogicalView: - r"""Call the get logical view method over HTTP. - - Args: - request (~.bigtable_instance_admin.GetLogicalViewRequest): - The request object. Request message for - BigtableInstanceAdmin.GetLogicalView. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.instance.LogicalView: - A SQL logical view object that can be - referenced in SQL queries. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView._get_http_options() - ) - - request, metadata = self._interceptor.pre_get_logical_view( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetLogicalView", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "GetLogicalView", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._GetLogicalView._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = instance.LogicalView() - pb_resp = instance.LogicalView.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_get_logical_view(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_logical_view_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = instance.LogicalView.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_logical_view", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "GetLogicalView", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _GetMaterializedView( - _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.GetMaterializedView") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.GetMaterializedViewRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.MaterializedView: - r"""Call the get materialized view method over HTTP. - - Args: - request (~.bigtable_instance_admin.GetMaterializedViewRequest): - The request object. Request message for - BigtableInstanceAdmin.GetMaterializedView. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.instance.MaterializedView: - A materialized view object that can - be referenced in SQL queries. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView._get_http_options() - ) - - request, metadata = self._interceptor.pre_get_materialized_view( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetMaterializedView", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "GetMaterializedView", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._GetMaterializedView._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = instance.MaterializedView() - pb_resp = instance.MaterializedView.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_get_materialized_view(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_materialized_view_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = instance.MaterializedView.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_materialized_view", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "GetMaterializedView", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _ListAppProfiles( - _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.ListAppProfiles") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.ListAppProfilesRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_instance_admin.ListAppProfilesResponse: - r"""Call the list app profiles method over HTTP. - - Args: - request (~.bigtable_instance_admin.ListAppProfilesRequest): - The request object. Request message for - BigtableInstanceAdmin.ListAppProfiles. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_instance_admin.ListAppProfilesResponse: - Response message for - BigtableInstanceAdmin.ListAppProfiles. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_http_options() - ) - - request, metadata = self._interceptor.pre_list_app_profiles( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListAppProfiles", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "ListAppProfiles", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._ListAppProfiles._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_instance_admin.ListAppProfilesResponse() - pb_resp = bigtable_instance_admin.ListAppProfilesResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_list_app_profiles(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_app_profiles_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = ( - bigtable_instance_admin.ListAppProfilesResponse.to_json( - response - ) - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_app_profiles", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "ListAppProfiles", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _ListClusters( - _BaseBigtableInstanceAdminRestTransport._BaseListClusters, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.ListClusters") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.ListClustersRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_instance_admin.ListClustersResponse: - r"""Call the list clusters method over HTTP. - - Args: - request (~.bigtable_instance_admin.ListClustersRequest): - The request object. Request message for - BigtableInstanceAdmin.ListClusters. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_instance_admin.ListClustersResponse: - Response message for - BigtableInstanceAdmin.ListClusters. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_http_options() - ) - - request, metadata = self._interceptor.pre_list_clusters(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListClusters", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "ListClusters", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._ListClusters._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_instance_admin.ListClustersResponse() - pb_resp = bigtable_instance_admin.ListClustersResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_list_clusters(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_clusters_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = ( - bigtable_instance_admin.ListClustersResponse.to_json(response) - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_clusters", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "ListClusters", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _ListHotTablets( - _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.ListHotTablets") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.ListHotTabletsRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_instance_admin.ListHotTabletsResponse: - r"""Call the list hot tablets method over HTTP. - - Args: - request (~.bigtable_instance_admin.ListHotTabletsRequest): - The request object. Request message for - BigtableInstanceAdmin.ListHotTablets. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_instance_admin.ListHotTabletsResponse: - Response message for - BigtableInstanceAdmin.ListHotTablets. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_http_options() - ) - - request, metadata = self._interceptor.pre_list_hot_tablets( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListHotTablets", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "ListHotTablets", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._ListHotTablets._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_instance_admin.ListHotTabletsResponse() - pb_resp = bigtable_instance_admin.ListHotTabletsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_list_hot_tablets(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_hot_tablets_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = ( - bigtable_instance_admin.ListHotTabletsResponse.to_json(response) - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_hot_tablets", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "ListHotTablets", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _ListInstances( - _BaseBigtableInstanceAdminRestTransport._BaseListInstances, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.ListInstances") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.ListInstancesRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_instance_admin.ListInstancesResponse: - r"""Call the list instances method over HTTP. - - Args: - request (~.bigtable_instance_admin.ListInstancesRequest): - The request object. Request message for - BigtableInstanceAdmin.ListInstances. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_instance_admin.ListInstancesResponse: - Response message for - BigtableInstanceAdmin.ListInstances. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_http_options() - ) - - request, metadata = self._interceptor.pre_list_instances(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListInstances", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "ListInstances", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._ListInstances._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_instance_admin.ListInstancesResponse() - pb_resp = bigtable_instance_admin.ListInstancesResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_list_instances(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_instances_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = ( - bigtable_instance_admin.ListInstancesResponse.to_json(response) - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_instances", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "ListInstances", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _ListLogicalViews( - _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.ListLogicalViews") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.ListLogicalViewsRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_instance_admin.ListLogicalViewsResponse: - r"""Call the list logical views method over HTTP. - - Args: - request (~.bigtable_instance_admin.ListLogicalViewsRequest): - The request object. Request message for - BigtableInstanceAdmin.ListLogicalViews. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_instance_admin.ListLogicalViewsResponse: - Response message for - BigtableInstanceAdmin.ListLogicalViews. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews._get_http_options() - ) - - request, metadata = self._interceptor.pre_list_logical_views( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListLogicalViews", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "ListLogicalViews", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._ListLogicalViews._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_instance_admin.ListLogicalViewsResponse() - pb_resp = bigtable_instance_admin.ListLogicalViewsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_list_logical_views(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_logical_views_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = ( - bigtable_instance_admin.ListLogicalViewsResponse.to_json( - response - ) - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_logical_views", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "ListLogicalViews", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _ListMaterializedViews( - _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.ListMaterializedViews") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.ListMaterializedViewsRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_instance_admin.ListMaterializedViewsResponse: - r"""Call the list materialized views method over HTTP. - - Args: - request (~.bigtable_instance_admin.ListMaterializedViewsRequest): - The request object. Request message for - BigtableInstanceAdmin.ListMaterializedViews. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_instance_admin.ListMaterializedViewsResponse: - Response message for - BigtableInstanceAdmin.ListMaterializedViews. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews._get_http_options() - ) - - request, metadata = self._interceptor.pre_list_materialized_views( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListMaterializedViews", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "ListMaterializedViews", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._ListMaterializedViews._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_instance_admin.ListMaterializedViewsResponse() - pb_resp = bigtable_instance_admin.ListMaterializedViewsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_list_materialized_views(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_materialized_views_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = ( - bigtable_instance_admin.ListMaterializedViewsResponse.to_json( - response - ) - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_materialized_views", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "ListMaterializedViews", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _PartialUpdateCluster( - _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.PartialUpdateCluster") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.PartialUpdateClusterRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the partial update cluster method over HTTP. - - Args: - request (~.bigtable_instance_admin.PartialUpdateClusterRequest): - The request object. Request message for - BigtableInstanceAdmin.PartialUpdateCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_http_options() - ) - - request, metadata = self._interceptor.pre_partial_update_cluster( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.PartialUpdateCluster", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "PartialUpdateCluster", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._PartialUpdateCluster._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_partial_update_cluster(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_partial_update_cluster_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.partial_update_cluster", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "PartialUpdateCluster", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _PartialUpdateInstance( - _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.PartialUpdateInstance") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.PartialUpdateInstanceRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the partial update instance method over HTTP. - - Args: - request (~.bigtable_instance_admin.PartialUpdateInstanceRequest): - The request object. Request message for - BigtableInstanceAdmin.PartialUpdateInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_http_options() - ) - - request, metadata = self._interceptor.pre_partial_update_instance( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.PartialUpdateInstance", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "PartialUpdateInstance", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._PartialUpdateInstance._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_partial_update_instance(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_partial_update_instance_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.partial_update_instance", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "PartialUpdateInstance", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _SetIamPolicy( - _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.SetIamPolicy") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: iam_policy_pb2.SetIamPolicyRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. - - Args: - request (~.iam_policy_pb2.SetIamPolicyRequest): - The request object. Request message for ``SetIamPolicy`` method. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which - specifies access controls for Google Cloud resources. - - A ``Policy`` is a collection of ``bindings``. A - ``binding`` binds one or more ``members``, or - principals, to a single ``role``. Principals can be user - accounts, service accounts, Google groups, and domains - (such as G Suite). A ``role`` is a named list of - permissions; each ``role`` can be an IAM predefined role - or a user-created custom role. - - For some types of Google Cloud resources, a ``binding`` - can also specify a ``condition``, which is a logical - expression that allows access to a resource only if the - expression evaluates to ``true``. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the `IAM - documentation `__. - - **JSON example:** - - :: - - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } - - **YAML example:** - - :: - - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 - - For a description of IAM and its features, see the `IAM - documentation `__. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_http_options() - ) - - request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.SetIamPolicy", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "SetIamPolicy", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._SetIamPolicy._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = policy_pb2.Policy() - pb_resp = resp - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_set_iam_policy(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_set_iam_policy_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.set_iam_policy", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "SetIamPolicy", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _TestIamPermissions( - _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.TestIamPermissions") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: iam_policy_pb2.TestIamPermissionsRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. - - Args: - request (~.iam_policy_pb2.TestIamPermissionsRequest): - The request object. Request message for ``TestIamPermissions`` method. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.iam_policy_pb2.TestIamPermissionsResponse: - Response message for ``TestIamPermissions`` method. - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_http_options() - ) - - request, metadata = self._interceptor.pre_test_iam_permissions( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.TestIamPermissions", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "TestIamPermissions", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._TestIamPermissions._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = iam_policy_pb2.TestIamPermissionsResponse() - pb_resp = resp - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_test_iam_permissions(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_test_iam_permissions_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.test_iam_permissions", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "TestIamPermissions", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _UpdateAppProfile( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.UpdateAppProfile") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.UpdateAppProfileRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the update app profile method over HTTP. - - Args: - request (~.bigtable_instance_admin.UpdateAppProfileRequest): - The request object. Request message for - BigtableInstanceAdmin.UpdateAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_http_options() - ) - - request, metadata = self._interceptor.pre_update_app_profile( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateAppProfile", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "UpdateAppProfile", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._UpdateAppProfile._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_update_app_profile(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_update_app_profile_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_app_profile", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "UpdateAppProfile", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _UpdateCluster( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.UpdateCluster") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: instance.Cluster, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the update cluster method over HTTP. - - Args: - request (~.instance.Cluster): - The request object. A resizable group of nodes in a particular cloud - location, capable of serving all - [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_http_options() - ) - - request, metadata = self._interceptor.pre_update_cluster(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateCluster", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "UpdateCluster", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._UpdateCluster._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_update_cluster(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_update_cluster_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_cluster", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "UpdateCluster", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _UpdateInstance( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.UpdateInstance") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: instance.Instance, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> instance.Instance: - r"""Call the update instance method over HTTP. - - Args: - request (~.instance.Instance): - The request object. A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.instance.Instance: - A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_http_options() - ) - - request, metadata = self._interceptor.pre_update_instance(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateInstance", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "UpdateInstance", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._UpdateInstance._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = instance.Instance() - pb_resp = instance.Instance.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_update_instance(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_update_instance_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = instance.Instance.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_instance", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "UpdateInstance", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _UpdateLogicalView( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.UpdateLogicalView") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.UpdateLogicalViewRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the update logical view method over HTTP. - - Args: - request (~.bigtable_instance_admin.UpdateLogicalViewRequest): - The request object. Request message for - BigtableInstanceAdmin.UpdateLogicalView. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_http_options() - ) - - request, metadata = self._interceptor.pre_update_logical_view( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateLogicalView", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "UpdateLogicalView", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableInstanceAdminRestTransport._UpdateLogicalView._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_update_logical_view(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_update_logical_view_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_logical_view", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "UpdateLogicalView", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _UpdateMaterializedView( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView, - BigtableInstanceAdminRestStub, - ): - def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.UpdateMaterializedView") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_instance_admin.UpdateMaterializedViewRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the update materialized view method over HTTP. - - Args: - request (~.bigtable_instance_admin.UpdateMaterializedViewRequest): - The request object. Request message for - BigtableInstanceAdmin.UpdateMaterializedView. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_http_options() - ) - - request, metadata = self._interceptor.pre_update_materialized_view( - request, metadata - ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateMaterializedView", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "UpdateMaterializedView", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableInstanceAdminRestTransport._UpdateMaterializedView._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_update_materialized_view(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_update_materialized_view_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_materialized_view", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": "UpdateMaterializedView", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - @property - def create_app_profile( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateAppProfileRequest], instance.AppProfile - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateAppProfile(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_cluster( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateClusterRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateCluster(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_instance( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateInstanceRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateInstance(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateLogicalViewRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateLogicalView(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.CreateMaterializedViewRequest], - operations_pb2.Operation, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateMaterializedView(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_app_profile( - self, - ) -> Callable[[bigtable_instance_admin.DeleteAppProfileRequest], empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteAppProfile(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_cluster( - self, - ) -> Callable[[bigtable_instance_admin.DeleteClusterRequest], empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteCluster(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_instance( - self, - ) -> Callable[[bigtable_instance_admin.DeleteInstanceRequest], empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteInstance(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_logical_view( - self, - ) -> Callable[[bigtable_instance_admin.DeleteLogicalViewRequest], empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteLogicalView(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.DeleteMaterializedViewRequest], empty_pb2.Empty - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteMaterializedView(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_app_profile( - self, - ) -> Callable[[bigtable_instance_admin.GetAppProfileRequest], instance.AppProfile]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetAppProfile(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_cluster( - self, - ) -> Callable[[bigtable_instance_admin.GetClusterRequest], instance.Cluster]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetCluster(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_iam_policy( - self, - ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_instance( - self, - ) -> Callable[[bigtable_instance_admin.GetInstanceRequest], instance.Instance]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetInstance(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.GetLogicalViewRequest], instance.LogicalView - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetLogicalView(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.GetMaterializedViewRequest], instance.MaterializedView - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetMaterializedView(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_app_profiles( - self, - ) -> Callable[ - [bigtable_instance_admin.ListAppProfilesRequest], - bigtable_instance_admin.ListAppProfilesResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListAppProfiles(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_clusters( - self, - ) -> Callable[ - [bigtable_instance_admin.ListClustersRequest], - bigtable_instance_admin.ListClustersResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListClusters(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_hot_tablets( - self, - ) -> Callable[ - [bigtable_instance_admin.ListHotTabletsRequest], - bigtable_instance_admin.ListHotTabletsResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListHotTablets(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_instances( - self, - ) -> Callable[ - [bigtable_instance_admin.ListInstancesRequest], - bigtable_instance_admin.ListInstancesResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListInstances(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_logical_views( - self, - ) -> Callable[ - [bigtable_instance_admin.ListLogicalViewsRequest], - bigtable_instance_admin.ListLogicalViewsResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListLogicalViews(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_materialized_views( - self, - ) -> Callable[ - [bigtable_instance_admin.ListMaterializedViewsRequest], - bigtable_instance_admin.ListMaterializedViewsResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListMaterializedViews(self._session, self._host, self._interceptor) # type: ignore - - @property - def partial_update_cluster( - self, - ) -> Callable[ - [bigtable_instance_admin.PartialUpdateClusterRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._PartialUpdateCluster(self._session, self._host, self._interceptor) # type: ignore - - @property - def partial_update_instance( - self, - ) -> Callable[ - [bigtable_instance_admin.PartialUpdateInstanceRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._PartialUpdateInstance(self._session, self._host, self._interceptor) # type: ignore - - @property - def set_iam_policy( - self, - ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore - - @property - def test_iam_permissions( - self, - ) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - iam_policy_pb2.TestIamPermissionsResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_app_profile( - self, - ) -> Callable[ - [bigtable_instance_admin.UpdateAppProfileRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateAppProfile(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_cluster(self) -> Callable[[instance.Cluster], operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateCluster(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_instance(self) -> Callable[[instance.Instance], instance.Instance]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateInstance(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_logical_view( - self, - ) -> Callable[ - [bigtable_instance_admin.UpdateLogicalViewRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateLogicalView(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_materialized_view( - self, - ) -> Callable[ - [bigtable_instance_admin.UpdateMaterializedViewRequest], - operations_pb2.Operation, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateMaterializedView(self._session, self._host, self._interceptor) # type: ignore - - @property - def kind(self) -> str: - return "rest" - - def close(self): - self._session.close() - - -__all__ = ("BigtableInstanceAdminRestTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py deleted file mode 100644 index 9855756b8..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py +++ /dev/null @@ -1,1746 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import json # type: ignore -from google.api_core import path_template -from google.api_core import gapic_v1 - -from google.protobuf import json_format -from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO - -import re -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union - - -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore - - -class _BaseBigtableInstanceAdminRestTransport(BigtableInstanceAdminTransport): - """Base REST backend transport for BigtableInstanceAdmin. - - Note: This class is not meant to be used directly. Use its sync and - async sub-classes instead. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends JSON representations of protocol buffers over HTTP/1.1 - """ - - def __init__( - self, - *, - host: str = "bigtableadmin.googleapis.com", - credentials: Optional[Any] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = "https", - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[Any]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. - """ - # Run the base constructor - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError( - f"Unexpected hostname structure: {host}" - ) # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - class _BaseCreateAppProfile: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "appProfileId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/appProfiles", - "body": "app_profile", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.CreateAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseCreateCluster: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "clusterId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/clusters", - "body": "cluster", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.CreateClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseCreateInstance: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*}/instances", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.CreateInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseCreateLogicalView: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "logicalViewId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/logicalViews", - "body": "logical_view", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.CreateLogicalViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseCreateMaterializedView: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "materializedViewId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/materializedViews", - "body": "materialized_view", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.CreateMaterializedViewRequest.pb( - request - ) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseDeleteAppProfile: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "ignoreWarnings": False, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/appProfiles/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.DeleteAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseDeleteCluster: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/clusters/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.DeleteClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseDeleteInstance: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.DeleteInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseDeleteLogicalView: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/logicalViews/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.DeleteLogicalViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseDeleteMaterializedView: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/materializedViews/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.DeleteMaterializedViewRequest.pb( - request - ) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGetAppProfile: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/appProfiles/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.GetAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGetCluster: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/clusters/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.GetClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGetIamPolicy: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*}:getIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/materializedViews/*}:getIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/logicalViews/*}:getIamPolicy", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGetInstance: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.GetInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGetLogicalView: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/logicalViews/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.GetLogicalViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGetMaterializedView: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/materializedViews/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.GetMaterializedViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseListAppProfiles: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*}/appProfiles", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.ListAppProfilesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseListClusters: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*}/clusters", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.ListClustersRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseListHotTablets: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.ListHotTabletsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseListInstances: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*}/instances", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.ListInstancesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseListLogicalViews: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*}/logicalViews", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.ListLogicalViewsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseListMaterializedViews: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*}/materializedViews", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.ListMaterializedViewsRequest.pb( - request - ) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BasePartialUpdateCluster: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{cluster.name=projects/*/instances/*/clusters/*}", - "body": "cluster", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.PartialUpdateClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BasePartialUpdateInstance: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{instance.name=projects/*/instances/*}", - "body": "instance", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( - request - ) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseSetIamPolicy: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*}:setIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/materializedViews/*}:setIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/logicalViews/*}:setIamPolicy", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseTestIamPermissions: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*}:testIamPermissions", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/materializedViews/*}:testIamPermissions", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/logicalViews/*}:testIamPermissions", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseUpdateAppProfile: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}", - "body": "app_profile", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.UpdateAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseUpdateCluster: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "put", - "uri": "/v2/{name=projects/*/instances/*/clusters/*}", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = instance.Cluster.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseUpdateInstance: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "put", - "uri": "/v2/{name=projects/*/instances/*}", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = instance.Instance.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseUpdateLogicalView: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{logical_view.name=projects/*/instances/*/logicalViews/*}", - "body": "logical_view", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.UpdateLogicalViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseUpdateMaterializedView: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{materialized_view.name=projects/*/instances/*/materializedViews/*}", - "body": "materialized_view", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_instance_admin.UpdateMaterializedViewRequest.pb( - request - ) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - -__all__ = ("_BaseBigtableInstanceAdminRestTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py deleted file mode 100644 index c5e8544d6..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import BaseBigtableTableAdminClient -from .async_client import BaseBigtableTableAdminAsyncClient - -__all__ = ( - "BaseBigtableTableAdminClient", - "BaseBigtableTableAdminAsyncClient", -) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py deleted file mode 100644 index 49ecb3c70..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ /dev/null @@ -1,4960 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import logging as std_logging -from collections import OrderedDict -import re -from typing import ( - Dict, - Callable, - Mapping, - MutableMapping, - MutableSequence, - Optional, - Sequence, - Tuple, - Type, - Union, -) - -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry_async as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore -import google.protobuf - - -try: - OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.cloud.bigtable_admin_v2.types import types -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport -from .client import BaseBigtableTableAdminClient - -try: - from google.api_core import client_logging # type: ignore - - CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER -except ImportError: # pragma: NO COVER - CLIENT_LOGGING_SUPPORTED = False - -_LOGGER = std_logging.getLogger(__name__) - - -class BaseBigtableTableAdminAsyncClient: - """Service for creating, configuring, and deleting Cloud - Bigtable tables. - - Provides access to the table schemas only, not the data stored - within the tables. - """ - - _client: BaseBigtableTableAdminClient - - # Copy defaults from the synchronous client for use here. - # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. - DEFAULT_ENDPOINT = BaseBigtableTableAdminClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT - _DEFAULT_ENDPOINT_TEMPLATE = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE - _DEFAULT_UNIVERSE = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE - - authorized_view_path = staticmethod( - BaseBigtableTableAdminClient.authorized_view_path - ) - parse_authorized_view_path = staticmethod( - BaseBigtableTableAdminClient.parse_authorized_view_path - ) - backup_path = staticmethod(BaseBigtableTableAdminClient.backup_path) - parse_backup_path = staticmethod(BaseBigtableTableAdminClient.parse_backup_path) - cluster_path = staticmethod(BaseBigtableTableAdminClient.cluster_path) - parse_cluster_path = staticmethod(BaseBigtableTableAdminClient.parse_cluster_path) - crypto_key_version_path = staticmethod( - BaseBigtableTableAdminClient.crypto_key_version_path - ) - parse_crypto_key_version_path = staticmethod( - BaseBigtableTableAdminClient.parse_crypto_key_version_path - ) - instance_path = staticmethod(BaseBigtableTableAdminClient.instance_path) - parse_instance_path = staticmethod(BaseBigtableTableAdminClient.parse_instance_path) - schema_bundle_path = staticmethod(BaseBigtableTableAdminClient.schema_bundle_path) - parse_schema_bundle_path = staticmethod( - BaseBigtableTableAdminClient.parse_schema_bundle_path - ) - snapshot_path = staticmethod(BaseBigtableTableAdminClient.snapshot_path) - parse_snapshot_path = staticmethod(BaseBigtableTableAdminClient.parse_snapshot_path) - table_path = staticmethod(BaseBigtableTableAdminClient.table_path) - parse_table_path = staticmethod(BaseBigtableTableAdminClient.parse_table_path) - common_billing_account_path = staticmethod( - BaseBigtableTableAdminClient.common_billing_account_path - ) - parse_common_billing_account_path = staticmethod( - BaseBigtableTableAdminClient.parse_common_billing_account_path - ) - common_folder_path = staticmethod(BaseBigtableTableAdminClient.common_folder_path) - parse_common_folder_path = staticmethod( - BaseBigtableTableAdminClient.parse_common_folder_path - ) - common_organization_path = staticmethod( - BaseBigtableTableAdminClient.common_organization_path - ) - parse_common_organization_path = staticmethod( - BaseBigtableTableAdminClient.parse_common_organization_path - ) - common_project_path = staticmethod(BaseBigtableTableAdminClient.common_project_path) - parse_common_project_path = staticmethod( - BaseBigtableTableAdminClient.parse_common_project_path - ) - common_location_path = staticmethod( - BaseBigtableTableAdminClient.common_location_path - ) - parse_common_location_path = staticmethod( - BaseBigtableTableAdminClient.parse_common_location_path - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BaseBigtableTableAdminAsyncClient: The constructed client. - """ - return BaseBigtableTableAdminClient.from_service_account_info.__func__(BaseBigtableTableAdminAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BaseBigtableTableAdminAsyncClient: The constructed client. - """ - return BaseBigtableTableAdminClient.from_service_account_file.__func__(BaseBigtableTableAdminAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @classmethod - def get_mtls_endpoint_and_cert_source( - cls, client_options: Optional[ClientOptions] = None - ): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - return BaseBigtableTableAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore - - @property - def transport(self) -> BigtableTableAdminTransport: - """Returns the transport used by the client instance. - - Returns: - BigtableTableAdminTransport: The transport used by the client instance. - """ - return self._client.transport - - @property - def api_endpoint(self): - """Return the API endpoint used by the client instance. - - Returns: - str: The API endpoint used by the client instance. - """ - return self._client._api_endpoint - - @property - def universe_domain(self) -> str: - """Return the universe domain used by the client instance. - - Returns: - str: The universe domain used - by the client instance. - """ - return self._client._universe_domain - - get_transport_class = BaseBigtableTableAdminClient.get_transport_class - - def __init__( - self, - *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[ - Union[ - str, - BigtableTableAdminTransport, - Callable[..., BigtableTableAdminTransport], - ] - ] = "grpc_asyncio", - client_options: Optional[ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the base bigtable table admin async client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]): - The transport to use, or a Callable that constructs and returns a new transport to use. - If a Callable is given, it will be called with the same set of initialization - arguments as used in the BigtableTableAdminTransport constructor. - If set to None, a transport is chosen automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): - Custom options for the client. - - 1. The ``api_endpoint`` property can be used to override the - default endpoint provided by the client when ``transport`` is - not explicitly provided. Only if this property is not set and - ``transport`` was not explicitly provided, the endpoint is - determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment - variable, which have one of the following values: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto-switch to the - default mTLS endpoint if client certificate is present; this is - the default value). - - 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide a client certificate for mTLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - 3. The ``universe_domain`` property can be used to override the - default "googleapis.com" universe. Note that ``api_endpoint`` - property still takes precedence; and ``universe_domain`` is - currently not supported for mTLS. - - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = BaseBigtableTableAdminClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - std_logging.DEBUG - ): # pragma: NO COVER - _LOGGER.debug( - "Created client `google.bigtable.admin_v2.BaseBigtableTableAdminAsyncClient`.", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "credentialsType": None, - }, - ) - - async def create_table( - self, - request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, - *, - parent: Optional[str] = None, - table_id: Optional[str] = None, - table: Optional[gba_table.Table] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> gba_table.Table: - r"""Creates a new table in the specified instance. - The table can be created with a full set of initial - column families, specified in the request. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_create_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateTableRequest( - parent="parent_value", - table_id="table_id_value", - ) - - # Make the request - response = await client.create_table(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - parent (:class:`str`): - Required. The unique name of the instance in which to - create the table. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - table_id (:class:`str`): - Required. The name by which the new table should be - referred to within the parent instance, e.g., ``foobar`` - rather than ``{parent}/tables/foobar``. Maximum 50 - characters. - - This corresponds to the ``table_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - table (:class:`google.cloud.bigtable_admin_v2.types.Table`): - Required. The Table to create. - This corresponds to the ``table`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, table_id, table] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CreateTableRequest): - request = bigtable_table_admin.CreateTableRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if table_id is not None: - request.table_id = table_id - if table is not None: - request.table = table - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.create_table - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_table_from_snapshot( - self, - request: Optional[ - Union[bigtable_table_admin.CreateTableFromSnapshotRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - table_id: Optional[str] = None, - source_snapshot: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new table from the specified snapshot. The - target table must not exist. The snapshot and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_create_table_from_snapshot(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateTableFromSnapshotRequest( - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - - # Make the request - operation = client.create_table_from_snapshot(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - parent (:class:`str`): - Required. The unique name of the instance in which to - create the table. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - table_id (:class:`str`): - Required. The name by which the new table should be - referred to within the parent instance, e.g., ``foobar`` - rather than ``{parent}/tables/foobar``. - - This corresponds to the ``table_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - source_snapshot (:class:`str`): - Required. The unique name of the snapshot from which to - restore the table. The snapshot and the table must be in - the same instance. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - - This corresponds to the ``source_snapshot`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, table_id, source_snapshot] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CreateTableFromSnapshotRequest): - request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if table_id is not None: - request.table_id = table_id - if source_snapshot is not None: - request.source_snapshot = source_snapshot - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.create_table_from_snapshot - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.CreateTableFromSnapshotMetadata, - ) - - # Done; return the response. - return response - - async def list_tables( - self, - request: Optional[Union[bigtable_table_admin.ListTablesRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListTablesAsyncPager: - r"""Lists all tables served from a specified instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_list_tables(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListTablesRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_tables(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - parent (:class:`str`): - Required. The unique name of the instance for which - tables should be listed. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.ListTablesRequest): - request = bigtable_table_admin.ListTablesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.list_tables - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTablesAsyncPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_table( - self, - request: Optional[Union[bigtable_table_admin.GetTableRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Table: - r"""Gets metadata information about the specified table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_get_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetTableRequest( - name="name_value", - ) - - # Make the request - response = await client.get_table(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - name (:class:`str`): - Required. The unique name of the requested table. Values - are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.GetTableRequest): - request = bigtable_table_admin.GetTableRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.get_table - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_table( - self, - request: Optional[Union[bigtable_table_admin.UpdateTableRequest, dict]] = None, - *, - table: Optional[gba_table.Table] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a specified table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_update_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.UpdateTableRequest( - ) - - # Make the request - operation = client.update_table(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]]): - The request object. The request for - [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - table (:class:`google.cloud.bigtable_admin_v2.types.Table`): - Required. The table to update. The table's ``name`` - field is used to identify the table to update. - - This corresponds to the ``table`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The list of fields to update. A mask - specifying which fields (e.g. ``change_stream_config``) - in the ``table`` field should be updated. This mask is - relative to the ``table`` field, not to the request - message. The wildcard (*) path is currently not - supported. Currently UpdateTable is only supported for - the following fields: - - - ``change_stream_config`` - - ``change_stream_config.retention_period`` - - ``deletion_protection`` - - ``row_key_schema`` - - If ``column_families`` is set in ``update_mask``, it - will return an UNIMPLEMENTED error. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [table, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.UpdateTableRequest): - request = bigtable_table_admin.UpdateTableRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table is not None: - request.table = table - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.update_table - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table.name", request.table.name),) - ), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gba_table.Table, - metadata_type=bigtable_table_admin.UpdateTableMetadata, - ) - - # Done; return the response. - return response - - async def delete_table( - self, - request: Optional[Union[bigtable_table_admin.DeleteTableRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Permanently deletes a specified table and all of its - data. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_delete_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteTableRequest( - name="name_value", - ) - - # Make the request - await client.delete_table(request=request) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - name (:class:`str`): - Required. The unique name of the table to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.DeleteTableRequest): - request = bigtable_table_admin.DeleteTableRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.delete_table - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def undelete_table( - self, - request: Optional[ - Union[bigtable_table_admin.UndeleteTableRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Restores a specified table which was accidentally - deleted. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_undelete_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.UndeleteTableRequest( - name="name_value", - ) - - # Make the request - operation = client.undelete_table(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] - name (:class:`str`): - Required. The unique name of the table to be restored. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.UndeleteTableRequest): - request = bigtable_table_admin.UndeleteTableRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.undelete_table - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.UndeleteTableMetadata, - ) - - # Done; return the response. - return response - - async def create_authorized_view( - self, - request: Optional[ - Union[bigtable_table_admin.CreateAuthorizedViewRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - authorized_view: Optional[table.AuthorizedView] = None, - authorized_view_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new AuthorizedView in a table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_create_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateAuthorizedViewRequest( - parent="parent_value", - authorized_view_id="authorized_view_id_value", - ) - - # Make the request - operation = client.create_authorized_view(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest, dict]]): - The request object. The request for - [CreateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView] - parent (:class:`str`): - Required. This is the name of the table the - AuthorizedView belongs to. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - authorized_view (:class:`google.cloud.bigtable_admin_v2.types.AuthorizedView`): - Required. The AuthorizedView to - create. - - This corresponds to the ``authorized_view`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - authorized_view_id (:class:`str`): - Required. The id of the AuthorizedView to create. This - AuthorizedView must not already exist. The - ``authorized_view_id`` appended to ``parent`` forms the - full AuthorizedView name of the form - ``projects/{project}/instances/{instance}/tables/{table}/authorizedView/{authorized_view}``. - - This corresponds to the ``authorized_view_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AuthorizedView` AuthorizedViews represent subsets of a particular Cloud Bigtable table. Users - can configure access to each Authorized View - independently from the table and use the existing - Data APIs to access the subset of data. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, authorized_view, authorized_view_id] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CreateAuthorizedViewRequest): - request = bigtable_table_admin.CreateAuthorizedViewRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if authorized_view is not None: - request.authorized_view = authorized_view - if authorized_view_id is not None: - request.authorized_view_id = authorized_view_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.create_authorized_view - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.AuthorizedView, - metadata_type=bigtable_table_admin.CreateAuthorizedViewMetadata, - ) - - # Done; return the response. - return response - - async def list_authorized_views( - self, - request: Optional[ - Union[bigtable_table_admin.ListAuthorizedViewsRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListAuthorizedViewsAsyncPager: - r"""Lists all AuthorizedViews from a specific table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_list_authorized_views(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListAuthorizedViewsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_authorized_views(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] - parent (:class:`str`): - Required. The unique name of the table for which - AuthorizedViews should be listed. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsAsyncPager: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.ListAuthorizedViewsRequest): - request = bigtable_table_admin.ListAuthorizedViewsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.list_authorized_views - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListAuthorizedViewsAsyncPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_authorized_view( - self, - request: Optional[ - Union[bigtable_table_admin.GetAuthorizedViewRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.AuthorizedView: - r"""Gets information from a specified AuthorizedView. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_get_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetAuthorizedViewRequest( - name="name_value", - ) - - # Make the request - response = await client.get_authorized_view(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView] - name (:class:`str`): - Required. The unique name of the requested - AuthorizedView. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.AuthorizedView: - AuthorizedViews represent subsets of - a particular Cloud Bigtable table. Users - can configure access to each Authorized - View independently from the table and - use the existing Data APIs to access the - subset of data. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.GetAuthorizedViewRequest): - request = bigtable_table_admin.GetAuthorizedViewRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.get_authorized_view - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_authorized_view( - self, - request: Optional[ - Union[bigtable_table_admin.UpdateAuthorizedViewRequest, dict] - ] = None, - *, - authorized_view: Optional[table.AuthorizedView] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates an AuthorizedView in a table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_update_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.UpdateAuthorizedViewRequest( - ) - - # Make the request - operation = client.update_authorized_view(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest, dict]]): - The request object. The request for - [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. - authorized_view (:class:`google.cloud.bigtable_admin_v2.types.AuthorizedView`): - Required. The AuthorizedView to update. The ``name`` in - ``authorized_view`` is used to identify the - AuthorizedView. AuthorizedView name must in this format: - ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. - - This corresponds to the ``authorized_view`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Optional. The list of fields to update. A mask - specifying which fields in the AuthorizedView resource - should be updated. This mask is relative to the - AuthorizedView resource, not to the request message. A - field will be overwritten if it is in the mask. If - empty, all fields set in the request will be - overwritten. A special value ``*`` means to overwrite - all fields (including fields not set in the request). - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AuthorizedView` AuthorizedViews represent subsets of a particular Cloud Bigtable table. Users - can configure access to each Authorized View - independently from the table and use the existing - Data APIs to access the subset of data. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [authorized_view, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.UpdateAuthorizedViewRequest): - request = bigtable_table_admin.UpdateAuthorizedViewRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if authorized_view is not None: - request.authorized_view = authorized_view - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.update_authorized_view - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("authorized_view.name", request.authorized_view.name),) - ), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.AuthorizedView, - metadata_type=bigtable_table_admin.UpdateAuthorizedViewMetadata, - ) - - # Done; return the response. - return response - - async def delete_authorized_view( - self, - request: Optional[ - Union[bigtable_table_admin.DeleteAuthorizedViewRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Permanently deletes a specified AuthorizedView. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_delete_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteAuthorizedViewRequest( - name="name_value", - ) - - # Make the request - await client.delete_authorized_view(request=request) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView] - name (:class:`str`): - Required. The unique name of the AuthorizedView to be - deleted. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.DeleteAuthorizedViewRequest): - request = bigtable_table_admin.DeleteAuthorizedViewRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.delete_authorized_view - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def _modify_column_families( - self, - request: Optional[ - Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] - ] = None, - *, - name: Optional[str] = None, - modifications: Optional[ - MutableSequence[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification - ] - ] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Table: - r"""Performs a series of column family modifications on - the specified table. Either all or none of the - modifications will occur before this method returns, but - data requests received prior to that point may see a - table where only some modifications have taken effect. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_modify_column_families(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ModifyColumnFamiliesRequest( - name="name_value", - ) - - # Make the request - response = await client._modify_column_families(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - name (:class:`str`): - Required. The unique name of the table whose families - should be modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - modifications (:class:`MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]`): - Required. Modifications to be - atomically applied to the specified - table's families. Entries are applied in - order, meaning that earlier - modifications can be masked by later - ones (in the case of repeated updates to - the same family, for example). - - This corresponds to the ``modifications`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name, modifications] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.ModifyColumnFamiliesRequest): - request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if modifications: - request.modifications.extend(modifications) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.modify_column_families - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def drop_row_range( - self, - request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Permanently drop/delete a row range from a specified - table. The request can specify whether to delete all - rows in a table, or only those that match a particular - prefix. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_drop_row_range(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DropRowRangeRequest( - row_key_prefix=b'row_key_prefix_blob', - name="name_value", - ) - - # Make the request - await client.drop_row_range(request=request) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.DropRowRangeRequest): - request = bigtable_table_admin.DropRowRangeRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.drop_row_range - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def generate_consistency_token( - self, - request: Optional[ - Union[bigtable_table_admin.GenerateConsistencyTokenRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: - r"""Generates a consistency token for a Table, which can - be used in CheckConsistency to check whether mutations - to the table that finished before this call started have - been replicated. The tokens will be available for 90 - days. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_generate_consistency_token(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GenerateConsistencyTokenRequest( - name="name_value", - ) - - # Make the request - response = await client.generate_consistency_token(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - name (:class:`str`): - Required. The unique name of the Table for which to - create a consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance( - request, bigtable_table_admin.GenerateConsistencyTokenRequest - ): - request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.generate_consistency_token - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def check_consistency( - self, - request: Optional[ - Union[bigtable_table_admin.CheckConsistencyRequest, dict] - ] = None, - *, - name: Optional[str] = None, - consistency_token: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_table_admin.CheckConsistencyResponse: - r"""Checks replication consistency based on a consistency - token, that is, if replication has caught up based on - the conditions specified in the token and the check - request. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_check_consistency(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CheckConsistencyRequest( - name="name_value", - consistency_token="consistency_token_value", - ) - - # Make the request - response = await client.check_consistency(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - name (:class:`str`): - Required. The unique name of the Table for which to - check replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - consistency_token (:class:`str`): - Required. The token created using - GenerateConsistencyToken for the Table. - - This corresponds to the ``consistency_token`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name, consistency_token] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CheckConsistencyRequest): - request = bigtable_table_admin.CheckConsistencyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if consistency_token is not None: - request.consistency_token = consistency_token - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.check_consistency - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def _snapshot_table( - self, - request: Optional[ - Union[bigtable_table_admin.SnapshotTableRequest, dict] - ] = None, - *, - name: Optional[str] = None, - cluster: Optional[str] = None, - snapshot_id: Optional[str] = None, - description: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new snapshot in the specified cluster from - the specified source table. The cluster and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_snapshot_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.SnapshotTableRequest( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - ) - - # Make the request - operation = client._snapshot_table(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - name (:class:`str`): - Required. The unique name of the table to have the - snapshot taken. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (:class:`str`): - Required. The name of the cluster where the snapshot - will be created in. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - snapshot_id (:class:`str`): - Required. The ID by which the new snapshot should be - referred to within the parent cluster, e.g., - ``mysnapshot`` of the form: - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. - - This corresponds to the ``snapshot_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - description (:class:`str`): - Description of the snapshot. - This corresponds to the ``description`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a - checkpoint for data restoration or a data source for - a new table. - - Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently - available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible - ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name, cluster, snapshot_id, description] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.SnapshotTableRequest): - request = bigtable_table_admin.SnapshotTableRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if cluster is not None: - request.cluster = cluster - if snapshot_id is not None: - request.snapshot_id = snapshot_id - if description is not None: - request.description = description - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.snapshot_table - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Snapshot, - metadata_type=bigtable_table_admin.SnapshotTableMetadata, - ) - - # Done; return the response. - return response - - async def _get_snapshot( - self, - request: Optional[Union[bigtable_table_admin.GetSnapshotRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Snapshot: - r"""Gets metadata information about the specified - snapshot. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_get_snapshot(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetSnapshotRequest( - name="name_value", - ) - - # Make the request - response = await client._get_snapshot(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - name (:class:`str`): - Required. The unique name of the requested snapshot. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Snapshot: - A snapshot of a table at a particular - time. A snapshot can be used as a - checkpoint for data restoration or a - data source for a new table. - - Note: This is a private alpha release of - Cloud Bigtable snapshots. This feature - is not currently available to most Cloud - Bigtable customers. This feature might - be changed in backward-incompatible ways - and is not recommended for production - use. It is not subject to any SLA or - deprecation policy. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.GetSnapshotRequest): - request = bigtable_table_admin.GetSnapshotRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.get_snapshot - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def _list_snapshots( - self, - request: Optional[ - Union[bigtable_table_admin.ListSnapshotsRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListSnapshotsAsyncPager: - r"""Lists all snapshots associated with the specified - cluster. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_list_snapshots(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListSnapshotsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client._list_snapshots(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - parent (:class:`str`): - Required. The unique name of the cluster for which - snapshots should be listed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list snapshots for all - clusters in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently - available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible - ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.ListSnapshotsRequest): - request = bigtable_table_admin.ListSnapshotsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.list_snapshots - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListSnapshotsAsyncPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def _delete_snapshot( - self, - request: Optional[ - Union[bigtable_table_admin.DeleteSnapshotRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_delete_snapshot(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteSnapshotRequest( - name="name_value", - ) - - # Make the request - await client._delete_snapshot(request=request) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - name (:class:`str`): - Required. The unique name of the snapshot to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.DeleteSnapshotRequest): - request = bigtable_table_admin.DeleteSnapshotRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.delete_snapshot - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_backup( - self, - request: Optional[Union[bigtable_table_admin.CreateBackupRequest, dict]] = None, - *, - parent: Optional[str] = None, - backup_id: Optional[str] = None, - backup: Optional[table.Backup] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be - used to track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Backup][google.bigtable.admin.v2.Backup], if successful. - Cancelling the returned operation will stop the creation and - delete the backup. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_create_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - backup = bigtable_admin_v2.Backup() - backup.source_table = "source_table_value" - - request = bigtable_admin_v2.CreateBackupRequest( - parent="parent_value", - backup_id="backup_id_value", - backup=backup, - ) - - # Make the request - operation = client.create_backup(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]]): - The request object. The request for - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - parent (:class:`str`): - Required. This must be one of the clusters in the - instance in which this table is located. The backup will - be stored in this cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup_id (:class:`str`): - Required. The id of the backup to be created. The - ``backup_id`` along with the parent ``parent`` are - combined as {parent}/backups/{backup_id} to create the - full backup name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - - This corresponds to the ``backup_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): - Required. The backup to create. - This corresponds to the ``backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.Backup` A - backup of a Cloud Bigtable table. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, backup_id, backup] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CreateBackupRequest): - request = bigtable_table_admin.CreateBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if backup_id is not None: - request.backup_id = backup_id - if backup is not None: - request.backup = backup - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.create_backup - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Backup, - metadata_type=bigtable_table_admin.CreateBackupMetadata, - ) - - # Done; return the response. - return response - - async def get_backup( - self, - request: Optional[Union[bigtable_table_admin.GetBackupRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Backup: - r"""Gets metadata on a pending or completed Cloud - Bigtable Backup. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_get_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetBackupRequest( - name="name_value", - ) - - # Make the request - response = await client.get_backup(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]]): - The request object. The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - name (:class:`str`): - Required. Name of the backup. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Backup: - A backup of a Cloud Bigtable table. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.GetBackupRequest): - request = bigtable_table_admin.GetBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.get_backup - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_backup( - self, - request: Optional[Union[bigtable_table_admin.UpdateBackupRequest, dict]] = None, - *, - backup: Optional[table.Backup] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Backup: - r"""Updates a pending or completed Cloud Bigtable Backup. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_update_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - backup = bigtable_admin_v2.Backup() - backup.source_table = "source_table_value" - - request = bigtable_admin_v2.UpdateBackupRequest( - backup=backup, - ) - - # Make the request - response = await client.update_backup(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]]): - The request object. The request for - [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): - Required. The backup to update. ``backup.name``, and the - fields to be updated as specified by ``update_mask`` are - required. Other fields are ignored. Update is only - supported for the following fields: - - - ``backup.expire_time``. - - This corresponds to the ``backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A mask specifying which fields (e.g. - ``expire_time``) in the Backup resource should be - updated. This mask is relative to the Backup resource, - not to the request message. The field mask must always - be specified; this prevents any future fields from being - erased accidentally by clients that do not know about - them. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Backup: - A backup of a Cloud Bigtable table. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [backup, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.UpdateBackupRequest): - request = bigtable_table_admin.UpdateBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if backup is not None: - request.backup = backup - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.update_backup - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("backup.name", request.backup.name),) - ), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_backup( - self, - request: Optional[Union[bigtable_table_admin.DeleteBackupRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Deletes a pending or completed Cloud Bigtable backup. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_delete_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteBackupRequest( - name="name_value", - ) - - # Make the request - await client.delete_backup(request=request) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]]): - The request object. The request for - [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - name (:class:`str`): - Required. Name of the backup to delete. Values are of - the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.DeleteBackupRequest): - request = bigtable_table_admin.DeleteBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.delete_backup - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def list_backups( - self, - request: Optional[Union[bigtable_table_admin.ListBackupsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListBackupsAsyncPager: - r"""Lists Cloud Bigtable backups. Returns both completed - and pending backups. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_list_backups(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListBackupsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_backups(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]]): - The request object. The request for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - parent (:class:`str`): - Required. The cluster to list backups from. Values are - of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list backups for all clusters - in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager: - The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.ListBackupsRequest): - request = bigtable_table_admin.ListBackupsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.list_backups - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListBackupsAsyncPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def _restore_table( - self, - request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Create a new table by restoring from a completed backup. The - returned table [long-running - operation][google.longrunning.Operation] can be used to track - the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. - The [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_restore_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.RestoreTableRequest( - backup="backup_value", - parent="parent_value", - table_id="table_id_value", - ) - - # Make the request - operation = client._restore_table(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]]): - The request object. The request for - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.RestoreTableRequest): - request = bigtable_table_admin.RestoreTableRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.restore_table - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.RestoreTableMetadata, - ) - - # Done; return the response. - return response - - async def copy_backup( - self, - request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, - *, - parent: Optional[str] = None, - backup_id: Optional[str] = None, - source_backup: Optional[str] = None, - expire_time: Optional[timestamp_pb2.Timestamp] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Copy a Cloud Bigtable backup to a new backup in the - destination cluster located in the destination instance - and project. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_copy_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CopyBackupRequest( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - ) - - # Make the request - operation = client.copy_backup(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]]): - The request object. The request for - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. - parent (:class:`str`): - Required. The name of the destination cluster that will - contain the backup copy. The cluster must already exist. - Values are of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup_id (:class:`str`): - Required. The id of the new backup. The ``backup_id`` - along with ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup - name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - - This corresponds to the ``backup_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - source_backup (:class:`str`): - Required. The source backup to be copied from. The - source backup needs to be in READY state for it to be - copied. Copying a copied backup is not allowed. Once - CopyBackup is in progress, the source backup cannot be - deleted or cleaned up on expiration until CopyBackup is - finished. Values are of the form: - ``projects//instances//clusters//backups/``. - - This corresponds to the ``source_backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - expire_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): - Required. Required. The expiration time of the copied - backup with microsecond granularity that must be at - least 6 hours and at most 30 days from the time the - request is received. Once the ``expire_time`` has - passed, Cloud Bigtable will delete the backup and free - the resources used by the backup. - - This corresponds to the ``expire_time`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.Backup` A - backup of a Cloud Bigtable table. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, backup_id, source_backup, expire_time] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CopyBackupRequest): - request = bigtable_table_admin.CopyBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if backup_id is not None: - request.backup_id = backup_id - if source_backup is not None: - request.source_backup = source_backup - if expire_time is not None: - request.expire_time = expire_time - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.copy_backup - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Backup, - metadata_type=bigtable_table_admin.CopyBackupMetadata, - ) - - # Done; return the response. - return response - - async def get_iam_policy( - self, - request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> policy_pb2.Policy: - r"""Gets the access control policy for a Bigtable - resource. Returns an empty policy if the resource exists - but does not have a policy set. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - from google.iam.v1 import iam_policy_pb2 # type: ignore - - async def sample_get_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = iam_policy_pb2.GetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = await client.get_iam_policy(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): - The request object. Request message for ``GetIamPolicy`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [resource] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.GetIamPolicyRequest(**request) - elif not request: - request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.get_iam_policy - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_iam_policy( - self, - request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> policy_pb2.Policy: - r"""Sets the access control policy on a Bigtable - resource. Replaces any existing policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - from google.iam.v1 import iam_policy_pb2 # type: ignore - - async def sample_set_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = iam_policy_pb2.SetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = await client.set_iam_policy(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): - The request object. Request message for ``SetIamPolicy`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [resource] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.SetIamPolicyRequest(**request) - elif not request: - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.set_iam_policy - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def test_iam_permissions( - self, - request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, - *, - resource: Optional[str] = None, - permissions: Optional[MutableSequence[str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Returns permissions that the caller has on the - specified Bigtable resource. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - from google.iam.v1 import iam_policy_pb2 # type: ignore - - async def sample_test_iam_permissions(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = iam_policy_pb2.TestIamPermissionsRequest( - resource="resource_value", - permissions=['permissions_value1', 'permissions_value2'], - ) - - # Make the request - response = await client.test_iam_permissions(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): - The request object. Request message for ``TestIamPermissions`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - permissions (:class:`MutableSequence[str]`): - The set of permissions to check for the ``resource``. - Permissions with wildcards (such as '*' or 'storage.*') - are not allowed. For more information see `IAM - Overview `__. - - This corresponds to the ``permissions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: - Response message for TestIamPermissions method. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [resource, permissions] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.TestIamPermissionsRequest(**request) - elif not request: - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.test_iam_permissions - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def _create_schema_bundle( - self, - request: Optional[ - Union[bigtable_table_admin.CreateSchemaBundleRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - schema_bundle_id: Optional[str] = None, - schema_bundle: Optional[table.SchemaBundle] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new schema bundle in the specified table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_create_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - schema_bundle = bigtable_admin_v2.SchemaBundle() - schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' - - request = bigtable_admin_v2.CreateSchemaBundleRequest( - parent="parent_value", - schema_bundle_id="schema_bundle_id_value", - schema_bundle=schema_bundle, - ) - - # Make the request - operation = client._create_schema_bundle(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest, dict]]): - The request object. The request for - [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. - parent (:class:`str`): - Required. The parent resource where this schema bundle - will be created. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - schema_bundle_id (:class:`str`): - Required. The unique ID to use for - the schema bundle, which will become the - final component of the schema bundle's - resource name. - - This corresponds to the ``schema_bundle_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - schema_bundle (:class:`google.cloud.bigtable_admin_v2.types.SchemaBundle`): - Required. The schema bundle to - create. - - This corresponds to the ``schema_bundle`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle` - A named collection of related schemas. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, schema_bundle_id, schema_bundle] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CreateSchemaBundleRequest): - request = bigtable_table_admin.CreateSchemaBundleRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if schema_bundle_id is not None: - request.schema_bundle_id = schema_bundle_id - if schema_bundle is not None: - request.schema_bundle = schema_bundle - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.create_schema_bundle - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.SchemaBundle, - metadata_type=bigtable_table_admin.CreateSchemaBundleMetadata, - ) - - # Done; return the response. - return response - - async def _update_schema_bundle( - self, - request: Optional[ - Union[bigtable_table_admin.UpdateSchemaBundleRequest, dict] - ] = None, - *, - schema_bundle: Optional[table.SchemaBundle] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a schema bundle in the specified table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_update_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - schema_bundle = bigtable_admin_v2.SchemaBundle() - schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' - - request = bigtable_admin_v2.UpdateSchemaBundleRequest( - schema_bundle=schema_bundle, - ) - - # Make the request - operation = client._update_schema_bundle(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest, dict]]): - The request object. The request for - [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. - schema_bundle (:class:`google.cloud.bigtable_admin_v2.types.SchemaBundle`): - Required. The schema bundle to update. - - The schema bundle's ``name`` field is used to identify - the schema bundle to update. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - - This corresponds to the ``schema_bundle`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Optional. The list of fields to - update. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle` - A named collection of related schemas. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [schema_bundle, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.UpdateSchemaBundleRequest): - request = bigtable_table_admin.UpdateSchemaBundleRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if schema_bundle is not None: - request.schema_bundle = schema_bundle - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.update_schema_bundle - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("schema_bundle.name", request.schema_bundle.name),) - ), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.SchemaBundle, - metadata_type=bigtable_table_admin.UpdateSchemaBundleMetadata, - ) - - # Done; return the response. - return response - - async def _get_schema_bundle( - self, - request: Optional[ - Union[bigtable_table_admin.GetSchemaBundleRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.SchemaBundle: - r"""Gets metadata information about the specified schema - bundle. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_get_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetSchemaBundleRequest( - name="name_value", - ) - - # Make the request - response = await client._get_schema_bundle(request=request) - - # Handle the response - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest, dict]]): - The request object. The request for - [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle]. - name (:class:`str`): - Required. The unique name of the schema bundle to - retrieve. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.SchemaBundle: - A named collection of related - schemas. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.GetSchemaBundleRequest): - request = bigtable_table_admin.GetSchemaBundleRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.get_schema_bundle - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def _list_schema_bundles( - self, - request: Optional[ - Union[bigtable_table_admin.ListSchemaBundlesRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListSchemaBundlesAsyncPager: - r"""Lists all schema bundles associated with the - specified table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_list_schema_bundles(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListSchemaBundlesRequest( - parent="parent_value", - ) - - # Make the request - page_result = client._list_schema_bundles(request=request) - - # Handle the response - async for response in page_result: - print(response) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest, dict]]): - The request object. The request for - [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. - parent (:class:`str`): - Required. The parent, which owns this collection of - schema bundles. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesAsyncPager: - The response for - [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.ListSchemaBundlesRequest): - request = bigtable_table_admin.ListSchemaBundlesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.list_schema_bundles - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListSchemaBundlesAsyncPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def _delete_schema_bundle( - self, - request: Optional[ - Union[bigtable_table_admin.DeleteSchemaBundleRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Deletes a schema bundle in the specified table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - async def sample_delete_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteSchemaBundleRequest( - name="name_value", - ) - - # Make the request - await client._delete_schema_bundle(request=request) - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest, dict]]): - The request object. The request for - [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle]. - name (:class:`str`): - Required. The unique name of the schema bundle to - delete. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.DeleteSchemaBundleRequest): - request = bigtable_table_admin.DeleteSchemaBundleRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._client._transport._wrapped_methods[ - self._client._transport.delete_schema_bundle - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def __aenter__(self) -> "BaseBigtableTableAdminAsyncClient": - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=package_version.__version__ -) - -if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER - DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ - - -__all__ = ("BaseBigtableTableAdminAsyncClient",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py deleted file mode 100644 index 41d32ef33..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ /dev/null @@ -1,5439 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from http import HTTPStatus -import json -import logging as std_logging -import os -import re -from typing import ( - Dict, - Callable, - Mapping, - MutableMapping, - MutableSequence, - Optional, - Sequence, - Tuple, - Type, - Union, - cast, -) -import warnings - -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore -import google.protobuf - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - -try: - from google.api_core import client_logging # type: ignore - - CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER -except ImportError: # pragma: NO COVER - CLIENT_LOGGING_SUPPORTED = False - -_LOGGER = std_logging.getLogger(__name__) - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.cloud.bigtable_admin_v2.types import types -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import BigtableTableAdminGrpcTransport -from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport -from .transports.rest import BigtableTableAdminRestTransport - - -class BaseBigtableTableAdminClientMeta(type): - """Metaclass for the BigtableTableAdmin client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - - _transport_registry = ( - OrderedDict() - ) # type: Dict[str, Type[BigtableTableAdminTransport]] - _transport_registry["grpc"] = BigtableTableAdminGrpcTransport - _transport_registry["grpc_asyncio"] = BigtableTableAdminGrpcAsyncIOTransport - _transport_registry["rest"] = BigtableTableAdminRestTransport - - def get_transport_class( - cls, - label: Optional[str] = None, - ) -> Type[BigtableTableAdminTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class BaseBigtableTableAdminClient(metaclass=BaseBigtableTableAdminClientMeta): - """Service for creating, configuring, and deleting Cloud - Bigtable tables. - - Provides access to the table schemas only, not the data stored - within the tables. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. - DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - _DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}" - _DEFAULT_UNIVERSE = "googleapis.com" - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BaseBigtableTableAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BaseBigtableTableAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> BigtableTableAdminTransport: - """Returns the transport used by the client instance. - - Returns: - BigtableTableAdminTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def authorized_view_path( - project: str, - instance: str, - table: str, - authorized_view: str, - ) -> str: - """Returns a fully-qualified authorized_view string.""" - return "projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}".format( - project=project, - instance=instance, - table=table, - authorized_view=authorized_view, - ) - - @staticmethod - def parse_authorized_view_path(path: str) -> Dict[str, str]: - """Parses a authorized_view path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)/authorizedViews/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def backup_path( - project: str, - instance: str, - cluster: str, - backup: str, - ) -> str: - """Returns a fully-qualified backup string.""" - return "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( - project=project, - instance=instance, - cluster=cluster, - backup=backup, - ) - - @staticmethod - def parse_backup_path(path: str) -> Dict[str, str]: - """Parses a backup path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/backups/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def cluster_path( - project: str, - instance: str, - cluster: str, - ) -> str: - """Returns a fully-qualified cluster string.""" - return "projects/{project}/instances/{instance}/clusters/{cluster}".format( - project=project, - instance=instance, - cluster=cluster, - ) - - @staticmethod - def parse_cluster_path(path: str) -> Dict[str, str]: - """Parses a cluster path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def crypto_key_version_path( - project: str, - location: str, - key_ring: str, - crypto_key: str, - crypto_key_version: str, - ) -> str: - """Returns a fully-qualified crypto_key_version string.""" - return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format( - project=project, - location=location, - key_ring=key_ring, - crypto_key=crypto_key, - crypto_key_version=crypto_key_version, - ) - - @staticmethod - def parse_crypto_key_version_path(path: str) -> Dict[str, str]: - """Parses a crypto_key_version path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)/cryptoKeyVersions/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def instance_path( - project: str, - instance: str, - ) -> str: - """Returns a fully-qualified instance string.""" - return "projects/{project}/instances/{instance}".format( - project=project, - instance=instance, - ) - - @staticmethod - def parse_instance_path(path: str) -> Dict[str, str]: - """Parses a instance path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def schema_bundle_path( - project: str, - instance: str, - table: str, - schema_bundle: str, - ) -> str: - """Returns a fully-qualified schema_bundle string.""" - return "projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}".format( - project=project, - instance=instance, - table=table, - schema_bundle=schema_bundle, - ) - - @staticmethod - def parse_schema_bundle_path(path: str) -> Dict[str, str]: - """Parses a schema_bundle path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)/schemaBundles/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def snapshot_path( - project: str, - instance: str, - cluster: str, - snapshot: str, - ) -> str: - """Returns a fully-qualified snapshot string.""" - return "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( - project=project, - instance=instance, - cluster=cluster, - snapshot=snapshot, - ) - - @staticmethod - def parse_snapshot_path(path: str) -> Dict[str, str]: - """Parses a snapshot path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/snapshots/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def table_path( - project: str, - instance: str, - table: str, - ) -> str: - """Returns a fully-qualified table string.""" - return "projects/{project}/instances/{instance}/tables/{table}".format( - project=project, - instance=instance, - table=table, - ) - - @staticmethod - def parse_table_path(path: str) -> Dict[str, str]: - """Parses a table path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", - path, - ) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path( - billing_account: str, - ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format( - billing_account=billing_account, - ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str, str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path( - folder: str, - ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format( - folder=folder, - ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str, str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path( - organization: str, - ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format( - organization=organization, - ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str, str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path( - project: str, - ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format( - project=project, - ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str, str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path( - project: str, - location: str, - ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format( - project=project, - location=location, - ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str, str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @classmethod - def get_mtls_endpoint_and_cert_source( - cls, client_options: Optional[client_options_lib.ClientOptions] = None - ): - """Deprecated. Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - - warnings.warn( - "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", - DeprecationWarning, - ) - if client_options is None: - client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError( - "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - ) - - # Figure out the client cert source to use. - client_cert_source = None - if use_client_cert == "true": - if client_options.client_cert_source: - client_cert_source = client_options.client_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - elif use_mtls_endpoint == "always" or ( - use_mtls_endpoint == "auto" and client_cert_source - ): - api_endpoint = cls.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = cls.DEFAULT_ENDPOINT - - return api_endpoint, client_cert_source - - @staticmethod - def _read_environment_variables(): - """Returns the environment variables used by the client. - - Returns: - Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, - GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. - - Raises: - ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not - any of ["true", "false"]. - google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT - is not any of ["auto", "never", "always"]. - """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() - universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError( - "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env - - @staticmethod - def _get_client_cert_source(provided_cert_source, use_cert_flag): - """Return the client cert source to be used by the client. - - Args: - provided_cert_source (bytes): The client certificate source provided. - use_cert_flag (bool): A flag indicating whether to use the client certificate. - - Returns: - bytes or None: The client cert source to be used by the client. - """ - client_cert_source = None - if use_cert_flag: - if provided_cert_source: - client_cert_source = provided_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - return client_cert_source - - @staticmethod - def _get_api_endpoint( - api_override, client_cert_source, universe_domain, use_mtls_endpoint - ): - """Return the API endpoint used by the client. - - Args: - api_override (str): The API endpoint override. If specified, this is always - the return value of this function and the other arguments are not used. - client_cert_source (bytes): The client certificate source used by the client. - universe_domain (str): The universe domain used by the client. - use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. - Possible values are "always", "auto", or "never". - - Returns: - str: The API endpoint to be used by the client. - """ - if api_override is not None: - api_endpoint = api_override - elif use_mtls_endpoint == "always" or ( - use_mtls_endpoint == "auto" and client_cert_source - ): - _default_universe = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE - if universe_domain != _default_universe: - raise MutualTLSChannelError( - f"mTLS is not supported in any universe other than {_default_universe}." - ) - api_endpoint = BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = ( - BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=universe_domain - ) - ) - return api_endpoint - - @staticmethod - def _get_universe_domain( - client_universe_domain: Optional[str], universe_domain_env: Optional[str] - ) -> str: - """Return the universe domain used by the client. - - Args: - client_universe_domain (Optional[str]): The universe domain configured via the client options. - universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. - - Returns: - str: The universe domain to be used by the client. - - Raises: - ValueError: If the universe domain is an empty string. - """ - universe_domain = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE - if client_universe_domain is not None: - universe_domain = client_universe_domain - elif universe_domain_env is not None: - universe_domain = universe_domain_env - if len(universe_domain.strip()) == 0: - raise ValueError("Universe Domain cannot be an empty string.") - return universe_domain - - def _validate_universe_domain(self): - """Validates client's and credentials' universe domains are consistent. - - Returns: - bool: True iff the configured universe domain is valid. - - Raises: - ValueError: If the configured universe domain is not valid. - """ - - # NOTE (b/349488459): universe validation is disabled until further notice. - return True - - def _add_cred_info_for_auth_errors( - self, error: core_exceptions.GoogleAPICallError - ) -> None: - """Adds credential info string to error details for 401/403/404 errors. - - Args: - error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. - """ - if error.code not in [ - HTTPStatus.UNAUTHORIZED, - HTTPStatus.FORBIDDEN, - HTTPStatus.NOT_FOUND, - ]: - return - - cred = self._transport._credentials - - # get_cred_info is only available in google-auth>=2.35.0 - if not hasattr(cred, "get_cred_info"): - return - - # ignore the type check since pypy test fails when get_cred_info - # is not available - cred_info = cred.get_cred_info() # type: ignore - if cred_info and hasattr(error._details, "append"): - error._details.append(json.dumps(cred_info)) - - @property - def api_endpoint(self): - """Return the API endpoint used by the client instance. - - Returns: - str: The API endpoint used by the client instance. - """ - return self._api_endpoint - - @property - def universe_domain(self) -> str: - """Return the universe domain used by the client instance. - - Returns: - str: The universe domain used by the client instance. - """ - return self._universe_domain - - def __init__( - self, - *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[ - Union[ - str, - BigtableTableAdminTransport, - Callable[..., BigtableTableAdminTransport], - ] - ] = None, - client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the base bigtable table admin client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]): - The transport to use, or a Callable that constructs and returns a new transport. - If a Callable is given, it will be called with the same set of initialization - arguments as used in the BigtableTableAdminTransport constructor. - If set to None, a transport is chosen automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): - Custom options for the client. - - 1. The ``api_endpoint`` property can be used to override the - default endpoint provided by the client when ``transport`` is - not explicitly provided. Only if this property is not set and - ``transport`` was not explicitly provided, the endpoint is - determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment - variable, which have one of the following values: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto-switch to the - default mTLS endpoint if client certificate is present; this is - the default value). - - 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide a client certificate for mTLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - 3. The ``universe_domain`` property can be used to override the - default "googleapis.com" universe. Note that the ``api_endpoint`` - property still takes precedence; and ``universe_domain`` is - currently not supported for mTLS. - - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client_options = client_options - if isinstance(self._client_options, dict): - self._client_options = client_options_lib.from_dict(self._client_options) - if self._client_options is None: - self._client_options = client_options_lib.ClientOptions() - self._client_options = cast( - client_options_lib.ClientOptions, self._client_options - ) - - universe_domain_opt = getattr(self._client_options, "universe_domain", None) - - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = BaseBigtableTableAdminClient._read_environment_variables() - self._client_cert_source = BaseBigtableTableAdminClient._get_client_cert_source( - self._client_options.client_cert_source, self._use_client_cert - ) - self._universe_domain = BaseBigtableTableAdminClient._get_universe_domain( - universe_domain_opt, self._universe_domain_env - ) - self._api_endpoint = None # updated below, depending on `transport` - - # Initialize the universe domain validation. - self._is_universe_domain_valid = False - - if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER - # Setup logging. - client_logging.initialize_logging() - - api_key_value = getattr(self._client_options, "api_key", None) - if api_key_value and credentials: - raise ValueError( - "client_options.api_key and credentials are mutually exclusive" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - transport_provided = isinstance(transport, BigtableTableAdminTransport) - if transport_provided: - # transport is a BigtableTableAdminTransport instance. - if credentials or self._client_options.credentials_file or api_key_value: - raise ValueError( - "When providing a transport instance, " - "provide its credentials directly." - ) - if self._client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = cast(BigtableTableAdminTransport, transport) - self._api_endpoint = self._transport.host - - self._api_endpoint = ( - self._api_endpoint - or BaseBigtableTableAdminClient._get_api_endpoint( - self._client_options.api_endpoint, - self._client_cert_source, - self._universe_domain, - self._use_mtls_endpoint, - ) - ) - - if not transport_provided: - import google.auth._default # type: ignore - - if api_key_value and hasattr( - google.auth._default, "get_api_key_credentials" - ): - credentials = google.auth._default.get_api_key_credentials( - api_key_value - ) - - transport_init: Union[ - Type[BigtableTableAdminTransport], - Callable[..., BigtableTableAdminTransport], - ] = ( - BaseBigtableTableAdminClient.get_transport_class(transport) - if isinstance(transport, str) or transport is None - else cast(Callable[..., BigtableTableAdminTransport], transport) - ) - # initialize with the provided callable or the passed in class - self._transport = transport_init( - credentials=credentials, - credentials_file=self._client_options.credentials_file, - host=self._api_endpoint, - scopes=self._client_options.scopes, - client_cert_source_for_mtls=self._client_cert_source, - quota_project_id=self._client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - api_audience=self._client_options.api_audience, - ) - - if "async" not in str(self._transport): - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - std_logging.DEBUG - ): # pragma: NO COVER - _LOGGER.debug( - "Created client `google.bigtable.admin_v2.BaseBigtableTableAdminClient`.", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "credentialsType": None, - }, - ) - - def create_table( - self, - request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, - *, - parent: Optional[str] = None, - table_id: Optional[str] = None, - table: Optional[gba_table.Table] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> gba_table.Table: - r"""Creates a new table in the specified instance. - The table can be created with a full set of initial - column families, specified in the request. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_create_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateTableRequest( - parent="parent_value", - table_id="table_id_value", - ) - - # Make the request - response = client.create_table(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - parent (str): - Required. The unique name of the instance in which to - create the table. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - table_id (str): - Required. The name by which the new table should be - referred to within the parent instance, e.g., ``foobar`` - rather than ``{parent}/tables/foobar``. Maximum 50 - characters. - - This corresponds to the ``table_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - table (google.cloud.bigtable_admin_v2.types.Table): - Required. The Table to create. - This corresponds to the ``table`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, table_id, table] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CreateTableRequest): - request = bigtable_table_admin.CreateTableRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if table_id is not None: - request.table_id = table_id - if table is not None: - request.table = table - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_table_from_snapshot( - self, - request: Optional[ - Union[bigtable_table_admin.CreateTableFromSnapshotRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - table_id: Optional[str] = None, - source_snapshot: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Creates a new table from the specified snapshot. The - target table must not exist. The snapshot and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_create_table_from_snapshot(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateTableFromSnapshotRequest( - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - - # Make the request - operation = client.create_table_from_snapshot(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - parent (str): - Required. The unique name of the instance in which to - create the table. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - table_id (str): - Required. The name by which the new table should be - referred to within the parent instance, e.g., ``foobar`` - rather than ``{parent}/tables/foobar``. - - This corresponds to the ``table_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - source_snapshot (str): - Required. The unique name of the snapshot from which to - restore the table. The snapshot and the table must be in - the same instance. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - - This corresponds to the ``source_snapshot`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, table_id, source_snapshot] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CreateTableFromSnapshotRequest): - request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if table_id is not None: - request.table_id = table_id - if source_snapshot is not None: - request.source_snapshot = source_snapshot - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[ - self._transport.create_table_from_snapshot - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.CreateTableFromSnapshotMetadata, - ) - - # Done; return the response. - return response - - def list_tables( - self, - request: Optional[Union[bigtable_table_admin.ListTablesRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListTablesPager: - r"""Lists all tables served from a specified instance. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_list_tables(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListTablesRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_tables(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - parent (str): - Required. The unique name of the instance for which - tables should be listed. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesPager: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.ListTablesRequest): - request = bigtable_table_admin.ListTablesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tables] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTablesPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_table( - self, - request: Optional[Union[bigtable_table_admin.GetTableRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Table: - r"""Gets metadata information about the specified table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_get_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetTableRequest( - name="name_value", - ) - - # Make the request - response = client.get_table(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - name (str): - Required. The unique name of the requested table. Values - are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.GetTableRequest): - request = bigtable_table_admin.GetTableRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_table( - self, - request: Optional[Union[bigtable_table_admin.UpdateTableRequest, dict]] = None, - *, - table: Optional[gba_table.Table] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Updates a specified table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_update_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.UpdateTableRequest( - ) - - # Make the request - operation = client.update_table(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]): - The request object. The request for - [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - table (google.cloud.bigtable_admin_v2.types.Table): - Required. The table to update. The table's ``name`` - field is used to identify the table to update. - - This corresponds to the ``table`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The list of fields to update. A mask - specifying which fields (e.g. ``change_stream_config``) - in the ``table`` field should be updated. This mask is - relative to the ``table`` field, not to the request - message. The wildcard (*) path is currently not - supported. Currently UpdateTable is only supported for - the following fields: - - - ``change_stream_config`` - - ``change_stream_config.retention_period`` - - ``deletion_protection`` - - ``row_key_schema`` - - If ``column_families`` is set in ``update_mask``, it - will return an UNIMPLEMENTED error. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [table, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.UpdateTableRequest): - request = bigtable_table_admin.UpdateTableRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table is not None: - request.table = table - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table.name", request.table.name),) - ), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - gba_table.Table, - metadata_type=bigtable_table_admin.UpdateTableMetadata, - ) - - # Done; return the response. - return response - - def delete_table( - self, - request: Optional[Union[bigtable_table_admin.DeleteTableRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Permanently deletes a specified table and all of its - data. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_delete_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteTableRequest( - name="name_value", - ) - - # Make the request - client.delete_table(request=request) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - name (str): - Required. The unique name of the table to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.DeleteTableRequest): - request = bigtable_table_admin.DeleteTableRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def undelete_table( - self, - request: Optional[ - Union[bigtable_table_admin.UndeleteTableRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Restores a specified table which was accidentally - deleted. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_undelete_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.UndeleteTableRequest( - name="name_value", - ) - - # Make the request - operation = client.undelete_table(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] - name (str): - Required. The unique name of the table to be restored. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.UndeleteTableRequest): - request = bigtable_table_admin.UndeleteTableRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.undelete_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.UndeleteTableMetadata, - ) - - # Done; return the response. - return response - - def create_authorized_view( - self, - request: Optional[ - Union[bigtable_table_admin.CreateAuthorizedViewRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - authorized_view: Optional[table.AuthorizedView] = None, - authorized_view_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Creates a new AuthorizedView in a table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_create_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateAuthorizedViewRequest( - parent="parent_value", - authorized_view_id="authorized_view_id_value", - ) - - # Make the request - operation = client.create_authorized_view(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest, dict]): - The request object. The request for - [CreateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView] - parent (str): - Required. This is the name of the table the - AuthorizedView belongs to. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): - Required. The AuthorizedView to - create. - - This corresponds to the ``authorized_view`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - authorized_view_id (str): - Required. The id of the AuthorizedView to create. This - AuthorizedView must not already exist. The - ``authorized_view_id`` appended to ``parent`` forms the - full AuthorizedView name of the form - ``projects/{project}/instances/{instance}/tables/{table}/authorizedView/{authorized_view}``. - - This corresponds to the ``authorized_view_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AuthorizedView` AuthorizedViews represent subsets of a particular Cloud Bigtable table. Users - can configure access to each Authorized View - independently from the table and use the existing - Data APIs to access the subset of data. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, authorized_view, authorized_view_id] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CreateAuthorizedViewRequest): - request = bigtable_table_admin.CreateAuthorizedViewRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if authorized_view is not None: - request.authorized_view = authorized_view - if authorized_view_id is not None: - request.authorized_view_id = authorized_view_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_authorized_view] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.AuthorizedView, - metadata_type=bigtable_table_admin.CreateAuthorizedViewMetadata, - ) - - # Done; return the response. - return response - - def list_authorized_views( - self, - request: Optional[ - Union[bigtable_table_admin.ListAuthorizedViewsRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListAuthorizedViewsPager: - r"""Lists all AuthorizedViews from a specific table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_list_authorized_views(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListAuthorizedViewsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_authorized_views(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] - parent (str): - Required. The unique name of the table for which - AuthorizedViews should be listed. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsPager: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.ListAuthorizedViewsRequest): - request = bigtable_table_admin.ListAuthorizedViewsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_authorized_views] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListAuthorizedViewsPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_authorized_view( - self, - request: Optional[ - Union[bigtable_table_admin.GetAuthorizedViewRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.AuthorizedView: - r"""Gets information from a specified AuthorizedView. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_get_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetAuthorizedViewRequest( - name="name_value", - ) - - # Make the request - response = client.get_authorized_view(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView] - name (str): - Required. The unique name of the requested - AuthorizedView. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.AuthorizedView: - AuthorizedViews represent subsets of - a particular Cloud Bigtable table. Users - can configure access to each Authorized - View independently from the table and - use the existing Data APIs to access the - subset of data. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.GetAuthorizedViewRequest): - request = bigtable_table_admin.GetAuthorizedViewRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_authorized_view] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_authorized_view( - self, - request: Optional[ - Union[bigtable_table_admin.UpdateAuthorizedViewRequest, dict] - ] = None, - *, - authorized_view: Optional[table.AuthorizedView] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Updates an AuthorizedView in a table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_update_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.UpdateAuthorizedViewRequest( - ) - - # Make the request - operation = client.update_authorized_view(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest, dict]): - The request object. The request for - [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. - authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): - Required. The AuthorizedView to update. The ``name`` in - ``authorized_view`` is used to identify the - AuthorizedView. AuthorizedView name must in this format: - ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. - - This corresponds to the ``authorized_view`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. The list of fields to update. A mask - specifying which fields in the AuthorizedView resource - should be updated. This mask is relative to the - AuthorizedView resource, not to the request message. A - field will be overwritten if it is in the mask. If - empty, all fields set in the request will be - overwritten. A special value ``*`` means to overwrite - all fields (including fields not set in the request). - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AuthorizedView` AuthorizedViews represent subsets of a particular Cloud Bigtable table. Users - can configure access to each Authorized View - independently from the table and use the existing - Data APIs to access the subset of data. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [authorized_view, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.UpdateAuthorizedViewRequest): - request = bigtable_table_admin.UpdateAuthorizedViewRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if authorized_view is not None: - request.authorized_view = authorized_view - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_authorized_view] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("authorized_view.name", request.authorized_view.name),) - ), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.AuthorizedView, - metadata_type=bigtable_table_admin.UpdateAuthorizedViewMetadata, - ) - - # Done; return the response. - return response - - def delete_authorized_view( - self, - request: Optional[ - Union[bigtable_table_admin.DeleteAuthorizedViewRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Permanently deletes a specified AuthorizedView. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_delete_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteAuthorizedViewRequest( - name="name_value", - ) - - # Make the request - client.delete_authorized_view(request=request) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView] - name (str): - Required. The unique name of the AuthorizedView to be - deleted. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.DeleteAuthorizedViewRequest): - request = bigtable_table_admin.DeleteAuthorizedViewRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_authorized_view] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def _modify_column_families( - self, - request: Optional[ - Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] - ] = None, - *, - name: Optional[str] = None, - modifications: Optional[ - MutableSequence[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification - ] - ] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Table: - r"""Performs a series of column family modifications on - the specified table. Either all or none of the - modifications will occur before this method returns, but - data requests received prior to that point may see a - table where only some modifications have taken effect. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_modify_column_families(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ModifyColumnFamiliesRequest( - name="name_value", - ) - - # Make the request - response = client._modify_column_families(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - name (str): - Required. The unique name of the table whose families - should be modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - modifications (MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): - Required. Modifications to be - atomically applied to the specified - table's families. Entries are applied in - order, meaning that earlier - modifications can be masked by later - ones (in the case of repeated updates to - the same family, for example). - - This corresponds to the ``modifications`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name, modifications] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.ModifyColumnFamiliesRequest): - request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if modifications is not None: - request.modifications = modifications - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.modify_column_families] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def drop_row_range( - self, - request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Permanently drop/delete a row range from a specified - table. The request can specify whether to delete all - rows in a table, or only those that match a particular - prefix. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_drop_row_range(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DropRowRangeRequest( - row_key_prefix=b'row_key_prefix_blob', - name="name_value", - ) - - # Make the request - client.drop_row_range(request=request) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.DropRowRangeRequest): - request = bigtable_table_admin.DropRowRangeRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.drop_row_range] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def generate_consistency_token( - self, - request: Optional[ - Union[bigtable_table_admin.GenerateConsistencyTokenRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: - r"""Generates a consistency token for a Table, which can - be used in CheckConsistency to check whether mutations - to the table that finished before this call started have - been replicated. The tokens will be available for 90 - days. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_generate_consistency_token(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GenerateConsistencyTokenRequest( - name="name_value", - ) - - # Make the request - response = client.generate_consistency_token(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - name (str): - Required. The unique name of the Table for which to - create a consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance( - request, bigtable_table_admin.GenerateConsistencyTokenRequest - ): - request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[ - self._transport.generate_consistency_token - ] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def check_consistency( - self, - request: Optional[ - Union[bigtable_table_admin.CheckConsistencyRequest, dict] - ] = None, - *, - name: Optional[str] = None, - consistency_token: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_table_admin.CheckConsistencyResponse: - r"""Checks replication consistency based on a consistency - token, that is, if replication has caught up based on - the conditions specified in the token and the check - request. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_check_consistency(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CheckConsistencyRequest( - name="name_value", - consistency_token="consistency_token_value", - ) - - # Make the request - response = client.check_consistency(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - name (str): - Required. The unique name of the Table for which to - check replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - consistency_token (str): - Required. The token created using - GenerateConsistencyToken for the Table. - - This corresponds to the ``consistency_token`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name, consistency_token] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CheckConsistencyRequest): - request = bigtable_table_admin.CheckConsistencyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if consistency_token is not None: - request.consistency_token = consistency_token - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.check_consistency] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def _snapshot_table( - self, - request: Optional[ - Union[bigtable_table_admin.SnapshotTableRequest, dict] - ] = None, - *, - name: Optional[str] = None, - cluster: Optional[str] = None, - snapshot_id: Optional[str] = None, - description: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Creates a new snapshot in the specified cluster from - the specified source table. The cluster and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_snapshot_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.SnapshotTableRequest( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - ) - - # Make the request - operation = client._snapshot_table(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - name (str): - Required. The unique name of the table to have the - snapshot taken. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (str): - Required. The name of the cluster where the snapshot - will be created in. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - snapshot_id (str): - Required. The ID by which the new snapshot should be - referred to within the parent cluster, e.g., - ``mysnapshot`` of the form: - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. - - This corresponds to the ``snapshot_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - description (str): - Description of the snapshot. - This corresponds to the ``description`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a - checkpoint for data restoration or a data source for - a new table. - - Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently - available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible - ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name, cluster, snapshot_id, description] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.SnapshotTableRequest): - request = bigtable_table_admin.SnapshotTableRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if cluster is not None: - request.cluster = cluster - if snapshot_id is not None: - request.snapshot_id = snapshot_id - if description is not None: - request.description = description - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.snapshot_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Snapshot, - metadata_type=bigtable_table_admin.SnapshotTableMetadata, - ) - - # Done; return the response. - return response - - def _get_snapshot( - self, - request: Optional[Union[bigtable_table_admin.GetSnapshotRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Snapshot: - r"""Gets metadata information about the specified - snapshot. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_get_snapshot(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetSnapshotRequest( - name="name_value", - ) - - # Make the request - response = client._get_snapshot(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - name (str): - Required. The unique name of the requested snapshot. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Snapshot: - A snapshot of a table at a particular - time. A snapshot can be used as a - checkpoint for data restoration or a - data source for a new table. - - Note: This is a private alpha release of - Cloud Bigtable snapshots. This feature - is not currently available to most Cloud - Bigtable customers. This feature might - be changed in backward-incompatible ways - and is not recommended for production - use. It is not subject to any SLA or - deprecation policy. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.GetSnapshotRequest): - request = bigtable_table_admin.GetSnapshotRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_snapshot] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def _list_snapshots( - self, - request: Optional[ - Union[bigtable_table_admin.ListSnapshotsRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListSnapshotsPager: - r"""Lists all snapshots associated with the specified - cluster. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_list_snapshots(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListSnapshotsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client._list_snapshots(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - parent (str): - Required. The unique name of the cluster for which - snapshots should be listed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list snapshots for all - clusters in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently - available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible - ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.ListSnapshotsRequest): - request = bigtable_table_admin.ListSnapshotsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_snapshots] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListSnapshotsPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def _delete_snapshot( - self, - request: Optional[ - Union[bigtable_table_admin.DeleteSnapshotRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_delete_snapshot(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteSnapshotRequest( - name="name_value", - ) - - # Make the request - client._delete_snapshot(request=request) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - name (str): - Required. The unique name of the snapshot to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.DeleteSnapshotRequest): - request = bigtable_table_admin.DeleteSnapshotRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_snapshot] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_backup( - self, - request: Optional[Union[bigtable_table_admin.CreateBackupRequest, dict]] = None, - *, - parent: Optional[str] = None, - backup_id: Optional[str] = None, - backup: Optional[table.Backup] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be - used to track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Backup][google.bigtable.admin.v2.Backup], if successful. - Cancelling the returned operation will stop the creation and - delete the backup. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_create_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - backup = bigtable_admin_v2.Backup() - backup.source_table = "source_table_value" - - request = bigtable_admin_v2.CreateBackupRequest( - parent="parent_value", - backup_id="backup_id_value", - backup=backup, - ) - - # Make the request - operation = client.create_backup(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]): - The request object. The request for - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - parent (str): - Required. This must be one of the clusters in the - instance in which this table is located. The backup will - be stored in this cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup_id (str): - Required. The id of the backup to be created. The - ``backup_id`` along with the parent ``parent`` are - combined as {parent}/backups/{backup_id} to create the - full backup name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - - This corresponds to the ``backup_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup (google.cloud.bigtable_admin_v2.types.Backup): - Required. The backup to create. - This corresponds to the ``backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.Backup` A - backup of a Cloud Bigtable table. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, backup_id, backup] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CreateBackupRequest): - request = bigtable_table_admin.CreateBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if backup_id is not None: - request.backup_id = backup_id - if backup is not None: - request.backup = backup - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_backup] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Backup, - metadata_type=bigtable_table_admin.CreateBackupMetadata, - ) - - # Done; return the response. - return response - - def get_backup( - self, - request: Optional[Union[bigtable_table_admin.GetBackupRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Backup: - r"""Gets metadata on a pending or completed Cloud - Bigtable Backup. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_get_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetBackupRequest( - name="name_value", - ) - - # Make the request - response = client.get_backup(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]): - The request object. The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - name (str): - Required. Name of the backup. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Backup: - A backup of a Cloud Bigtable table. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.GetBackupRequest): - request = bigtable_table_admin.GetBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_backup] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_backup( - self, - request: Optional[Union[bigtable_table_admin.UpdateBackupRequest, dict]] = None, - *, - backup: Optional[table.Backup] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Backup: - r"""Updates a pending or completed Cloud Bigtable Backup. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_update_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - backup = bigtable_admin_v2.Backup() - backup.source_table = "source_table_value" - - request = bigtable_admin_v2.UpdateBackupRequest( - backup=backup, - ) - - # Make the request - response = client.update_backup(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]): - The request object. The request for - [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - backup (google.cloud.bigtable_admin_v2.types.Backup): - Required. The backup to update. ``backup.name``, and the - fields to be updated as specified by ``update_mask`` are - required. Other fields are ignored. Update is only - supported for the following fields: - - - ``backup.expire_time``. - - This corresponds to the ``backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A mask specifying which fields (e.g. - ``expire_time``) in the Backup resource should be - updated. This mask is relative to the Backup resource, - not to the request message. The field mask must always - be specified; this prevents any future fields from being - erased accidentally by clients that do not know about - them. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.Backup: - A backup of a Cloud Bigtable table. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [backup, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.UpdateBackupRequest): - request = bigtable_table_admin.UpdateBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if backup is not None: - request.backup = backup - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_backup] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("backup.name", request.backup.name),) - ), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_backup( - self, - request: Optional[Union[bigtable_table_admin.DeleteBackupRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Deletes a pending or completed Cloud Bigtable backup. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_delete_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteBackupRequest( - name="name_value", - ) - - # Make the request - client.delete_backup(request=request) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]): - The request object. The request for - [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - name (str): - Required. Name of the backup to delete. Values are of - the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.DeleteBackupRequest): - request = bigtable_table_admin.DeleteBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_backup] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def list_backups( - self, - request: Optional[Union[bigtable_table_admin.ListBackupsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListBackupsPager: - r"""Lists Cloud Bigtable backups. Returns both completed - and pending backups. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_list_backups(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListBackupsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_backups(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]): - The request object. The request for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - parent (str): - Required. The cluster to list backups from. Values are - of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list backups for all clusters - in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager: - The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.ListBackupsRequest): - request = bigtable_table_admin.ListBackupsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_backups] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListBackupsPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def _restore_table( - self, - request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Create a new table by restoring from a completed backup. The - returned table [long-running - operation][google.longrunning.Operation] can be used to track - the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. - The [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_restore_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.RestoreTableRequest( - backup="backup_value", - parent="parent_value", - table_id="table_id_value", - ) - - # Make the request - operation = client._restore_table(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): - The request object. The request for - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.RestoreTableRequest): - request = bigtable_table_admin.RestoreTableRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.restore_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.RestoreTableMetadata, - ) - - # Done; return the response. - return response - - def copy_backup( - self, - request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, - *, - parent: Optional[str] = None, - backup_id: Optional[str] = None, - source_backup: Optional[str] = None, - expire_time: Optional[timestamp_pb2.Timestamp] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Copy a Cloud Bigtable backup to a new backup in the - destination cluster located in the destination instance - and project. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_copy_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CopyBackupRequest( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - ) - - # Make the request - operation = client.copy_backup(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]): - The request object. The request for - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. - parent (str): - Required. The name of the destination cluster that will - contain the backup copy. The cluster must already exist. - Values are of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup_id (str): - Required. The id of the new backup. The ``backup_id`` - along with ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup - name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - - This corresponds to the ``backup_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - source_backup (str): - Required. The source backup to be copied from. The - source backup needs to be in READY state for it to be - copied. Copying a copied backup is not allowed. Once - CopyBackup is in progress, the source backup cannot be - deleted or cleaned up on expiration until CopyBackup is - finished. Values are of the form: - ``projects//instances//clusters//backups/``. - - This corresponds to the ``source_backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - expire_time (google.protobuf.timestamp_pb2.Timestamp): - Required. Required. The expiration time of the copied - backup with microsecond granularity that must be at - least 6 hours and at most 30 days from the time the - request is received. Once the ``expire_time`` has - passed, Cloud Bigtable will delete the backup and free - the resources used by the backup. - - This corresponds to the ``expire_time`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.Backup` A - backup of a Cloud Bigtable table. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, backup_id, source_backup, expire_time] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CopyBackupRequest): - request = bigtable_table_admin.CopyBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if backup_id is not None: - request.backup_id = backup_id - if source_backup is not None: - request.source_backup = source_backup - if expire_time is not None: - request.expire_time = expire_time - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.copy_backup] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Backup, - metadata_type=bigtable_table_admin.CopyBackupMetadata, - ) - - # Done; return the response. - return response - - def get_iam_policy( - self, - request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> policy_pb2.Policy: - r"""Gets the access control policy for a Bigtable - resource. Returns an empty policy if the resource exists - but does not have a policy set. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - from google.iam.v1 import iam_policy_pb2 # type: ignore - - def sample_get_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = iam_policy_pb2.GetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = client.get_iam_policy(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): - The request object. Request message for ``GetIamPolicy`` method. - resource (str): - REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [resource] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - if isinstance(request, dict): - # - The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.GetIamPolicyRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.GetIamPolicyRequest() - if resource is not None: - request.resource = resource - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_iam_policy( - self, - request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> policy_pb2.Policy: - r"""Sets the access control policy on a Bigtable - resource. Replaces any existing policy. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - from google.iam.v1 import iam_policy_pb2 # type: ignore - - def sample_set_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = iam_policy_pb2.SetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = client.set_iam_policy(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): - The request object. Request message for ``SetIamPolicy`` method. - resource (str): - REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [resource] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - if isinstance(request, dict): - # - The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.SetIamPolicyRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.SetIamPolicyRequest() - if resource is not None: - request.resource = resource - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def test_iam_permissions( - self, - request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, - *, - resource: Optional[str] = None, - permissions: Optional[MutableSequence[str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Returns permissions that the caller has on the - specified Bigtable resource. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - from google.iam.v1 import iam_policy_pb2 # type: ignore - - def sample_test_iam_permissions(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = iam_policy_pb2.TestIamPermissionsRequest( - resource="resource_value", - permissions=['permissions_value1', 'permissions_value2'], - ) - - # Make the request - response = client.test_iam_permissions(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): - The request object. Request message for ``TestIamPermissions`` method. - resource (str): - REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - permissions (MutableSequence[str]): - The set of permissions to check for the ``resource``. - Permissions with wildcards (such as '*' or 'storage.*') - are not allowed. For more information see `IAM - Overview `__. - - This corresponds to the ``permissions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: - Response message for TestIamPermissions method. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [resource, permissions] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - if isinstance(request, dict): - # - The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.TestIamPermissionsRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.TestIamPermissionsRequest() - if resource is not None: - request.resource = resource - if permissions: - request.permissions.extend(permissions) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def _create_schema_bundle( - self, - request: Optional[ - Union[bigtable_table_admin.CreateSchemaBundleRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - schema_bundle_id: Optional[str] = None, - schema_bundle: Optional[table.SchemaBundle] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Creates a new schema bundle in the specified table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_create_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - schema_bundle = bigtable_admin_v2.SchemaBundle() - schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' - - request = bigtable_admin_v2.CreateSchemaBundleRequest( - parent="parent_value", - schema_bundle_id="schema_bundle_id_value", - schema_bundle=schema_bundle, - ) - - # Make the request - operation = client._create_schema_bundle(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest, dict]): - The request object. The request for - [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. - parent (str): - Required. The parent resource where this schema bundle - will be created. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - schema_bundle_id (str): - Required. The unique ID to use for - the schema bundle, which will become the - final component of the schema bundle's - resource name. - - This corresponds to the ``schema_bundle_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle): - Required. The schema bundle to - create. - - This corresponds to the ``schema_bundle`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle` - A named collection of related schemas. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent, schema_bundle_id, schema_bundle] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.CreateSchemaBundleRequest): - request = bigtable_table_admin.CreateSchemaBundleRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if schema_bundle_id is not None: - request.schema_bundle_id = schema_bundle_id - if schema_bundle is not None: - request.schema_bundle = schema_bundle - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_schema_bundle] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.SchemaBundle, - metadata_type=bigtable_table_admin.CreateSchemaBundleMetadata, - ) - - # Done; return the response. - return response - - def _update_schema_bundle( - self, - request: Optional[ - Union[bigtable_table_admin.UpdateSchemaBundleRequest, dict] - ] = None, - *, - schema_bundle: Optional[table.SchemaBundle] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operation.Operation: - r"""Updates a schema bundle in the specified table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_update_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - schema_bundle = bigtable_admin_v2.SchemaBundle() - schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' - - request = bigtable_admin_v2.UpdateSchemaBundleRequest( - schema_bundle=schema_bundle, - ) - - # Make the request - operation = client._update_schema_bundle(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest, dict]): - The request object. The request for - [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. - schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle): - Required. The schema bundle to update. - - The schema bundle's ``name`` field is used to identify - the schema bundle to update. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - - This corresponds to the ``schema_bundle`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. The list of fields to - update. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle` - A named collection of related schemas. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [schema_bundle, update_mask] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.UpdateSchemaBundleRequest): - request = bigtable_table_admin.UpdateSchemaBundleRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if schema_bundle is not None: - request.schema_bundle = schema_bundle - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_schema_bundle] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("schema_bundle.name", request.schema_bundle.name),) - ), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.SchemaBundle, - metadata_type=bigtable_table_admin.UpdateSchemaBundleMetadata, - ) - - # Done; return the response. - return response - - def _get_schema_bundle( - self, - request: Optional[ - Union[bigtable_table_admin.GetSchemaBundleRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.SchemaBundle: - r"""Gets metadata information about the specified schema - bundle. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_get_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetSchemaBundleRequest( - name="name_value", - ) - - # Make the request - response = client._get_schema_bundle(request=request) - - # Handle the response - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest, dict]): - The request object. The request for - [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle]. - name (str): - Required. The unique name of the schema bundle to - retrieve. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.types.SchemaBundle: - A named collection of related - schemas. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.GetSchemaBundleRequest): - request = bigtable_table_admin.GetSchemaBundleRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_schema_bundle] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def _list_schema_bundles( - self, - request: Optional[ - Union[bigtable_table_admin.ListSchemaBundlesRequest, dict] - ] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> pagers.ListSchemaBundlesPager: - r"""Lists all schema bundles associated with the - specified table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_list_schema_bundles(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListSchemaBundlesRequest( - parent="parent_value", - ) - - # Make the request - page_result = client._list_schema_bundles(request=request) - - # Handle the response - for response in page_result: - print(response) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest, dict]): - The request object. The request for - [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. - parent (str): - Required. The parent, which owns this collection of - schema bundles. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesPager: - The response for - [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [parent] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.ListSchemaBundlesRequest): - request = bigtable_table_admin.ListSchemaBundlesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_schema_bundles] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListSchemaBundlesPager( - method=rpc, - request=request, - response=response, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def _delete_schema_bundle( - self, - request: Optional[ - Union[bigtable_table_admin.DeleteSchemaBundleRequest, dict] - ] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> None: - r"""Deletes a schema bundle in the specified table. - - .. code-block:: python - - # This snippet has been automatically generated and should be regarded as a - # code template only. - # It will require modifications to work: - # - It may require correct/in-range values for request initialization. - # - It may require specifying regional endpoints when creating the service - # client as shown in: - # https://googleapis.dev/python/google-api-core/latest/client_options.html - from google.cloud import bigtable_admin_v2 - - def sample_delete_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteSchemaBundleRequest( - name="name_value", - ) - - # Make the request - client._delete_schema_bundle(request=request) - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest, dict]): - The request object. The request for - [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle]. - name (str): - Required. The unique name of the schema bundle to - delete. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - # Create or coerce a protobuf request object. - # - Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - flattened_params = [name] - has_flattened_params = ( - len([param for param in flattened_params if param is not None]) > 0 - ) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # - Use the request object if provided (there's no risk of modifying the input as - # there are no flattened fields), or create one. - if not isinstance(request, bigtable_table_admin.DeleteSchemaBundleRequest): - request = bigtable_table_admin.DeleteSchemaBundleRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_schema_bundle] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def __enter__(self) -> "BaseBigtableTableAdminClient": - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=package_version.__version__ -) - -if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER - DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ - -__all__ = ("BaseBigtableTableAdminClient",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py deleted file mode 100644 index e6d83ba63..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ /dev/null @@ -1,829 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import retry_async as retries_async -from typing import ( - Any, - AsyncIterator, - Awaitable, - Callable, - Sequence, - Tuple, - Optional, - Iterator, - Union, -) - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] - OptionalAsyncRetry = Union[ - retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None - ] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore - -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table - - -class ListTablesPager: - """A pager for iterating through ``list_tables`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tables`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTables`` requests and continue to iterate - through the ``tables`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[..., bigtable_table_admin.ListTablesResponse], - request: bigtable_table_admin.ListTablesRequest, - response: bigtable_table_admin.ListTablesResponse, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): - The initial response object. - retry (google.api_core.retry.Retry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_table_admin.ListTablesRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_table_admin.ListTablesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __iter__(self) -> Iterator[table.Table]: - for page in self.pages: - yield from page.tables - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListTablesAsyncPager: - """A pager for iterating through ``list_tables`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tables`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTables`` requests and continue to iterate - through the ``tables`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[..., Awaitable[bigtable_table_admin.ListTablesResponse]], - request: bigtable_table_admin.ListTablesRequest, - response: bigtable_table_admin.ListTablesResponse, - *, - retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): - The initial response object. - retry (google.api_core.retry.AsyncRetry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_table_admin.ListTablesRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[bigtable_table_admin.ListTablesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __aiter__(self) -> AsyncIterator[table.Table]: - async def async_generator(): - async for page in self.pages: - for response in page.tables: - yield response - - return async_generator() - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListAuthorizedViewsPager: - """A pager for iterating through ``list_authorized_views`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``authorized_views`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListAuthorizedViews`` requests and continue to iterate - through the ``authorized_views`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[..., bigtable_table_admin.ListAuthorizedViewsResponse], - request: bigtable_table_admin.ListAuthorizedViewsRequest, - response: bigtable_table_admin.ListAuthorizedViewsResponse, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse): - The initial response object. - retry (google.api_core.retry.Retry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_table_admin.ListAuthorizedViewsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __iter__(self) -> Iterator[table.AuthorizedView]: - for page in self.pages: - yield from page.authorized_views - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListAuthorizedViewsAsyncPager: - """A pager for iterating through ``list_authorized_views`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``authorized_views`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListAuthorizedViews`` requests and continue to iterate - through the ``authorized_views`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[ - ..., Awaitable[bigtable_table_admin.ListAuthorizedViewsResponse] - ], - request: bigtable_table_admin.ListAuthorizedViewsRequest, - response: bigtable_table_admin.ListAuthorizedViewsResponse, - *, - retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse): - The initial response object. - retry (google.api_core.retry.AsyncRetry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages( - self, - ) -> AsyncIterator[bigtable_table_admin.ListAuthorizedViewsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __aiter__(self) -> AsyncIterator[table.AuthorizedView]: - async def async_generator(): - async for page in self.pages: - for response in page.authorized_views: - yield response - - return async_generator() - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListSnapshotsPager: - """A pager for iterating through ``list_snapshots`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``snapshots`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListSnapshots`` requests and continue to iterate - through the ``snapshots`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[..., bigtable_table_admin.ListSnapshotsResponse], - request: bigtable_table_admin.ListSnapshotsRequest, - response: bigtable_table_admin.ListSnapshotsResponse, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): - The initial response object. - retry (google.api_core.retry.Retry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_table_admin.ListSnapshotsRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_table_admin.ListSnapshotsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __iter__(self) -> Iterator[table.Snapshot]: - for page in self.pages: - yield from page.snapshots - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListSnapshotsAsyncPager: - """A pager for iterating through ``list_snapshots`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``snapshots`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListSnapshots`` requests and continue to iterate - through the ``snapshots`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[..., Awaitable[bigtable_table_admin.ListSnapshotsResponse]], - request: bigtable_table_admin.ListSnapshotsRequest, - response: bigtable_table_admin.ListSnapshotsResponse, - *, - retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): - The initial response object. - retry (google.api_core.retry.AsyncRetry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_table_admin.ListSnapshotsRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[bigtable_table_admin.ListSnapshotsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __aiter__(self) -> AsyncIterator[table.Snapshot]: - async def async_generator(): - async for page in self.pages: - for response in page.snapshots: - yield response - - return async_generator() - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListBackupsPager: - """A pager for iterating through ``list_backups`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``backups`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListBackups`` requests and continue to iterate - through the ``backups`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[..., bigtable_table_admin.ListBackupsResponse], - request: bigtable_table_admin.ListBackupsRequest, - response: bigtable_table_admin.ListBackupsResponse, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): - The initial response object. - retry (google.api_core.retry.Retry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_table_admin.ListBackupsRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_table_admin.ListBackupsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __iter__(self) -> Iterator[table.Backup]: - for page in self.pages: - yield from page.backups - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListBackupsAsyncPager: - """A pager for iterating through ``list_backups`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``backups`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListBackups`` requests and continue to iterate - through the ``backups`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[..., Awaitable[bigtable_table_admin.ListBackupsResponse]], - request: bigtable_table_admin.ListBackupsRequest, - response: bigtable_table_admin.ListBackupsResponse, - *, - retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): - The initial response object. - retry (google.api_core.retry.AsyncRetry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_table_admin.ListBackupsRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[bigtable_table_admin.ListBackupsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __aiter__(self) -> AsyncIterator[table.Backup]: - async def async_generator(): - async for page in self.pages: - for response in page.backups: - yield response - - return async_generator() - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListSchemaBundlesPager: - """A pager for iterating through ``list_schema_bundles`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``schema_bundles`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListSchemaBundles`` requests and continue to iterate - through the ``schema_bundles`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[..., bigtable_table_admin.ListSchemaBundlesResponse], - request: bigtable_table_admin.ListSchemaBundlesRequest, - response: bigtable_table_admin.ListSchemaBundlesResponse, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse): - The initial response object. - retry (google.api_core.retry.Retry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_table_admin.ListSchemaBundlesRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_table_admin.ListSchemaBundlesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __iter__(self) -> Iterator[table.SchemaBundle]: - for page in self.pages: - yield from page.schema_bundles - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) - - -class ListSchemaBundlesAsyncPager: - """A pager for iterating through ``list_schema_bundles`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``schema_bundles`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListSchemaBundles`` requests and continue to iterate - through the ``schema_bundles`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - - def __init__( - self, - method: Callable[ - ..., Awaitable[bigtable_table_admin.ListSchemaBundlesResponse] - ], - request: bigtable_table_admin.ListSchemaBundlesRequest, - response: bigtable_table_admin.ListSchemaBundlesResponse, - *, - retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = () - ): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse): - The initial response object. - retry (google.api_core.retry.AsyncRetry): Designation of what errors, - if any, should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - self._method = method - self._request = bigtable_table_admin.ListSchemaBundlesRequest(request) - self._response = response - self._retry = retry - self._timeout = timeout - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages( - self, - ) -> AsyncIterator[bigtable_table_admin.ListSchemaBundlesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method( - self._request, - retry=self._retry, - timeout=self._timeout, - metadata=self._metadata, - ) - yield self._response - - def __aiter__(self) -> AsyncIterator[table.SchemaBundle]: - async def async_generator(): - async for page in self.pages: - for response in page.schema_bundles: - yield response - - return async_generator() - - def __repr__(self) -> str: - return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/README.rst b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/README.rst deleted file mode 100644 index 0e8f40ec3..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/README.rst +++ /dev/null @@ -1,9 +0,0 @@ - -transport inheritance structure -_______________________________ - -`BigtableTableAdminTransport` is the ABC for all transports. -- public child `BigtableTableAdminGrpcTransport` for sync gRPC transport (defined in `grpc.py`). -- public child `BigtableTableAdminGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). -- private child `_BaseBigtableTableAdminRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). -- public child `BigtableTableAdminRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py deleted file mode 100644 index e7621f781..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import BigtableTableAdminTransport -from .grpc import BigtableTableAdminGrpcTransport -from .grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport -from .rest import BigtableTableAdminRestTransport -from .rest import BigtableTableAdminRestInterceptor - - -# Compile a registry of transports. -_transport_registry = ( - OrderedDict() -) # type: Dict[str, Type[BigtableTableAdminTransport]] -_transport_registry["grpc"] = BigtableTableAdminGrpcTransport -_transport_registry["grpc_asyncio"] = BigtableTableAdminGrpcAsyncIOTransport -_transport_registry["rest"] = BigtableTableAdminRestTransport - -__all__ = ( - "BigtableTableAdminTransport", - "BigtableTableAdminGrpcTransport", - "BigtableTableAdminGrpcAsyncIOTransport", - "BigtableTableAdminRestTransport", - "BigtableTableAdminRestInterceptor", -) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py deleted file mode 100644 index 8e2cb7304..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ /dev/null @@ -1,785 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union - -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore -import google.protobuf - -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=package_version.__version__ -) - -if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER - DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ - - -class BigtableTableAdminTransport(abc.ABC): - """Abstract transport class for BigtableTableAdmin.""" - - AUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - DEFAULT_HOST: str = "bigtableadmin.googleapis.com" - - def __init__( - self, - *, - host: str = DEFAULT_HOST, - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - if not hasattr(self, "_ignore_credentials"): - self._ignore_credentials: bool = False - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs( - "'credentials_file' and 'credentials' are mutually exclusive" - ) - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id - ) - elif credentials is None and not self._ignore_credentials: - credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id - ) - # Don't apply audience if the credentials file passed from user. - if hasattr(credentials, "with_gdch_audience"): - credentials = credentials.with_gdch_audience( - api_audience if api_audience else host - ) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if ( - always_use_jwt_access - and isinstance(credentials, service_account.Credentials) - and hasattr(service_account.Credentials, "with_always_use_jwt_access") - ): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ":" not in host: - host += ":443" - self._host = host - - @property - def host(self): - return self._host - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_table: gapic_v1.method.wrap_method( - self.create_table, - default_timeout=300.0, - client_info=client_info, - ), - self.create_table_from_snapshot: gapic_v1.method.wrap_method( - self.create_table_from_snapshot, - default_timeout=None, - client_info=client_info, - ), - self.list_tables: gapic_v1.method.wrap_method( - self.list_tables, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.get_table: gapic_v1.method.wrap_method( - self.get_table, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_table: gapic_v1.method.wrap_method( - self.update_table, - default_timeout=None, - client_info=client_info, - ), - self.delete_table: gapic_v1.method.wrap_method( - self.delete_table, - default_timeout=300.0, - client_info=client_info, - ), - self.undelete_table: gapic_v1.method.wrap_method( - self.undelete_table, - default_timeout=None, - client_info=client_info, - ), - self.create_authorized_view: gapic_v1.method.wrap_method( - self.create_authorized_view, - default_timeout=None, - client_info=client_info, - ), - self.list_authorized_views: gapic_v1.method.wrap_method( - self.list_authorized_views, - default_timeout=None, - client_info=client_info, - ), - self.get_authorized_view: gapic_v1.method.wrap_method( - self.get_authorized_view, - default_timeout=None, - client_info=client_info, - ), - self.update_authorized_view: gapic_v1.method.wrap_method( - self.update_authorized_view, - default_timeout=None, - client_info=client_info, - ), - self.delete_authorized_view: gapic_v1.method.wrap_method( - self.delete_authorized_view, - default_timeout=None, - client_info=client_info, - ), - self.modify_column_families: gapic_v1.method.wrap_method( - self.modify_column_families, - default_timeout=300.0, - client_info=client_info, - ), - self.drop_row_range: gapic_v1.method.wrap_method( - self.drop_row_range, - default_timeout=3600.0, - client_info=client_info, - ), - self.generate_consistency_token: gapic_v1.method.wrap_method( - self.generate_consistency_token, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.check_consistency: gapic_v1.method.wrap_method( - self.check_consistency, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=3600.0, - ), - default_timeout=3600.0, - client_info=client_info, - ), - self.snapshot_table: gapic_v1.method.wrap_method( - self.snapshot_table, - default_timeout=None, - client_info=client_info, - ), - self.get_snapshot: gapic_v1.method.wrap_method( - self.get_snapshot, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_snapshots: gapic_v1.method.wrap_method( - self.list_snapshots, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.delete_snapshot: gapic_v1.method.wrap_method( - self.delete_snapshot, - default_timeout=300.0, - client_info=client_info, - ), - self.create_backup: gapic_v1.method.wrap_method( - self.create_backup, - default_timeout=60.0, - client_info=client_info, - ), - self.get_backup: gapic_v1.method.wrap_method( - self.get_backup, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_backup: gapic_v1.method.wrap_method( - self.update_backup, - default_timeout=60.0, - client_info=client_info, - ), - self.delete_backup: gapic_v1.method.wrap_method( - self.delete_backup, - default_timeout=300.0, - client_info=client_info, - ), - self.list_backups: gapic_v1.method.wrap_method( - self.list_backups, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.restore_table: gapic_v1.method.wrap_method( - self.restore_table, - default_timeout=60.0, - client_info=client_info, - ), - self.copy_backup: gapic_v1.method.wrap_method( - self.copy_backup, - default_timeout=None, - client_info=client_info, - ), - self.get_iam_policy: gapic_v1.method.wrap_method( - self.get_iam_policy, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.set_iam_policy: gapic_v1.method.wrap_method( - self.set_iam_policy, - default_timeout=60.0, - client_info=client_info, - ), - self.test_iam_permissions: gapic_v1.method.wrap_method( - self.test_iam_permissions, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.create_schema_bundle: gapic_v1.method.wrap_method( - self.create_schema_bundle, - default_timeout=None, - client_info=client_info, - ), - self.update_schema_bundle: gapic_v1.method.wrap_method( - self.update_schema_bundle, - default_timeout=None, - client_info=client_info, - ), - self.get_schema_bundle: gapic_v1.method.wrap_method( - self.get_schema_bundle, - default_timeout=None, - client_info=client_info, - ), - self.list_schema_bundles: gapic_v1.method.wrap_method( - self.list_schema_bundles, - default_timeout=None, - client_info=client_info, - ), - self.delete_schema_bundle: gapic_v1.method.wrap_method( - self.delete_schema_bundle, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_table( - self, - ) -> Callable[ - [bigtable_table_admin.CreateTableRequest], - Union[gba_table.Table, Awaitable[gba_table.Table]], - ]: - raise NotImplementedError() - - @property - def create_table_from_snapshot( - self, - ) -> Callable[ - [bigtable_table_admin.CreateTableFromSnapshotRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def list_tables( - self, - ) -> Callable[ - [bigtable_table_admin.ListTablesRequest], - Union[ - bigtable_table_admin.ListTablesResponse, - Awaitable[bigtable_table_admin.ListTablesResponse], - ], - ]: - raise NotImplementedError() - - @property - def get_table( - self, - ) -> Callable[ - [bigtable_table_admin.GetTableRequest], - Union[table.Table, Awaitable[table.Table]], - ]: - raise NotImplementedError() - - @property - def update_table( - self, - ) -> Callable[ - [bigtable_table_admin.UpdateTableRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def delete_table( - self, - ) -> Callable[ - [bigtable_table_admin.DeleteTableRequest], - Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], - ]: - raise NotImplementedError() - - @property - def undelete_table( - self, - ) -> Callable[ - [bigtable_table_admin.UndeleteTableRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def create_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.CreateAuthorizedViewRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def list_authorized_views( - self, - ) -> Callable[ - [bigtable_table_admin.ListAuthorizedViewsRequest], - Union[ - bigtable_table_admin.ListAuthorizedViewsResponse, - Awaitable[bigtable_table_admin.ListAuthorizedViewsResponse], - ], - ]: - raise NotImplementedError() - - @property - def get_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.GetAuthorizedViewRequest], - Union[table.AuthorizedView, Awaitable[table.AuthorizedView]], - ]: - raise NotImplementedError() - - @property - def update_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.UpdateAuthorizedViewRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def delete_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.DeleteAuthorizedViewRequest], - Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], - ]: - raise NotImplementedError() - - @property - def modify_column_families( - self, - ) -> Callable[ - [bigtable_table_admin.ModifyColumnFamiliesRequest], - Union[table.Table, Awaitable[table.Table]], - ]: - raise NotImplementedError() - - @property - def drop_row_range( - self, - ) -> Callable[ - [bigtable_table_admin.DropRowRangeRequest], - Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], - ]: - raise NotImplementedError() - - @property - def generate_consistency_token( - self, - ) -> Callable[ - [bigtable_table_admin.GenerateConsistencyTokenRequest], - Union[ - bigtable_table_admin.GenerateConsistencyTokenResponse, - Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse], - ], - ]: - raise NotImplementedError() - - @property - def check_consistency( - self, - ) -> Callable[ - [bigtable_table_admin.CheckConsistencyRequest], - Union[ - bigtable_table_admin.CheckConsistencyResponse, - Awaitable[bigtable_table_admin.CheckConsistencyResponse], - ], - ]: - raise NotImplementedError() - - @property - def snapshot_table( - self, - ) -> Callable[ - [bigtable_table_admin.SnapshotTableRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def get_snapshot( - self, - ) -> Callable[ - [bigtable_table_admin.GetSnapshotRequest], - Union[table.Snapshot, Awaitable[table.Snapshot]], - ]: - raise NotImplementedError() - - @property - def list_snapshots( - self, - ) -> Callable[ - [bigtable_table_admin.ListSnapshotsRequest], - Union[ - bigtable_table_admin.ListSnapshotsResponse, - Awaitable[bigtable_table_admin.ListSnapshotsResponse], - ], - ]: - raise NotImplementedError() - - @property - def delete_snapshot( - self, - ) -> Callable[ - [bigtable_table_admin.DeleteSnapshotRequest], - Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], - ]: - raise NotImplementedError() - - @property - def create_backup( - self, - ) -> Callable[ - [bigtable_table_admin.CreateBackupRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def get_backup( - self, - ) -> Callable[ - [bigtable_table_admin.GetBackupRequest], - Union[table.Backup, Awaitable[table.Backup]], - ]: - raise NotImplementedError() - - @property - def update_backup( - self, - ) -> Callable[ - [bigtable_table_admin.UpdateBackupRequest], - Union[table.Backup, Awaitable[table.Backup]], - ]: - raise NotImplementedError() - - @property - def delete_backup( - self, - ) -> Callable[ - [bigtable_table_admin.DeleteBackupRequest], - Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], - ]: - raise NotImplementedError() - - @property - def list_backups( - self, - ) -> Callable[ - [bigtable_table_admin.ListBackupsRequest], - Union[ - bigtable_table_admin.ListBackupsResponse, - Awaitable[bigtable_table_admin.ListBackupsResponse], - ], - ]: - raise NotImplementedError() - - @property - def restore_table( - self, - ) -> Callable[ - [bigtable_table_admin.RestoreTableRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def copy_backup( - self, - ) -> Callable[ - [bigtable_table_admin.CopyBackupRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def get_iam_policy( - self, - ) -> Callable[ - [iam_policy_pb2.GetIamPolicyRequest], - Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], - ]: - raise NotImplementedError() - - @property - def set_iam_policy( - self, - ) -> Callable[ - [iam_policy_pb2.SetIamPolicyRequest], - Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], - ]: - raise NotImplementedError() - - @property - def test_iam_permissions( - self, - ) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - Union[ - iam_policy_pb2.TestIamPermissionsResponse, - Awaitable[iam_policy_pb2.TestIamPermissionsResponse], - ], - ]: - raise NotImplementedError() - - @property - def create_schema_bundle( - self, - ) -> Callable[ - [bigtable_table_admin.CreateSchemaBundleRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def update_schema_bundle( - self, - ) -> Callable[ - [bigtable_table_admin.UpdateSchemaBundleRequest], - Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], - ]: - raise NotImplementedError() - - @property - def get_schema_bundle( - self, - ) -> Callable[ - [bigtable_table_admin.GetSchemaBundleRequest], - Union[table.SchemaBundle, Awaitable[table.SchemaBundle]], - ]: - raise NotImplementedError() - - @property - def list_schema_bundles( - self, - ) -> Callable[ - [bigtable_table_admin.ListSchemaBundlesRequest], - Union[ - bigtable_table_admin.ListSchemaBundlesResponse, - Awaitable[bigtable_table_admin.ListSchemaBundlesResponse], - ], - ]: - raise NotImplementedError() - - @property - def delete_schema_bundle( - self, - ) -> Callable[ - [bigtable_table_admin.DeleteSchemaBundleRequest], - Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], - ]: - raise NotImplementedError() - - @property - def kind(self) -> str: - raise NotImplementedError() - - -__all__ = ("BigtableTableAdminTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py deleted file mode 100644 index 5f46c3aa3..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ /dev/null @@ -1,1395 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import json -import logging as std_logging -import pickle -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.protobuf.json_format import MessageToJson -import google.protobuf.message - -import grpc # type: ignore -import proto # type: ignore - -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO - -try: - from google.api_core import client_logging # type: ignore - - CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER -except ImportError: # pragma: NO COVER - CLIENT_LOGGING_SUPPORTED = False - -_LOGGER = std_logging.getLogger(__name__) - - -class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER - def intercept_unary_unary(self, continuation, client_call_details, request): - logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - std_logging.DEBUG - ) - if logging_enabled: # pragma: NO COVER - request_metadata = client_call_details.metadata - if isinstance(request, proto.Message): - request_payload = type(request).to_json(request) - elif isinstance(request, google.protobuf.message.Message): - request_payload = MessageToJson(request) - else: - request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" - - request_metadata = { - key: value.decode("utf-8") if isinstance(value, bytes) else value - for key, value in request_metadata - } - grpc_request = { - "payload": request_payload, - "requestMethod": "grpc", - "metadata": dict(request_metadata), - } - _LOGGER.debug( - f"Sending request for {client_call_details.method}", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": str(client_call_details.method), - "request": grpc_request, - "metadata": grpc_request["metadata"], - }, - ) - response = continuation(client_call_details, request) - if logging_enabled: # pragma: NO COVER - response_metadata = response.trailing_metadata() - # Convert gRPC metadata `` to list of tuples - metadata = ( - dict([(k, str(v)) for k, v in response_metadata]) - if response_metadata - else None - ) - result = response.result() - if isinstance(result, proto.Message): - response_payload = type(result).to_json(result) - elif isinstance(result, google.protobuf.message.Message): - response_payload = MessageToJson(result) - else: - response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" - grpc_response = { - "payload": response_payload, - "metadata": metadata, - "status": "OK", - } - _LOGGER.debug( - f"Received response for {client_call_details.method}.", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": client_call_details.method, - "response": grpc_response, - "metadata": grpc_response["metadata"], - }, - ) - return response - - -class BigtableTableAdminGrpcTransport(BigtableTableAdminTransport): - """gRPC backend transport for BigtableTableAdmin. - - Service for creating, configuring, and deleting Cloud - Bigtable tables. - - Provides access to the table schemas only, not the data stored - within the tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _stubs: Dict[str, Callable] - - def __init__( - self, - *, - host: str = "bigtableadmin.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if a ``channel`` instance is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if a ``channel`` instance is provided. - channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): - A ``Channel`` instance through which to make calls, or a Callable - that constructs and returns one. If set to None, ``self.create_channel`` - is used to create the channel. If a Callable is given, it will be called - with the same arguments as used in ``self.create_channel``. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if a ``channel`` instance is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if isinstance(channel, grpc.Channel): - # Ignore credentials if a channel was passed. - credentials = None - self._ignore_credentials = True - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - # initialize with the provided callable or the default channel - channel_init = channel or type(self).create_channel - self._grpc_channel = channel_init( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - self._interceptor = _LoggingClientInterceptor() - self._logged_channel = grpc.intercept_channel( - self._grpc_channel, self._interceptor - ) - - # Wrap messages. This must be done after self._logged_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel( - cls, - host: str = "bigtableadmin.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs, - ) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs, - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service.""" - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self._logged_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_table( - self, - ) -> Callable[[bigtable_table_admin.CreateTableRequest], gba_table.Table]: - r"""Return a callable for the create table method over gRPC. - - Creates a new table in the specified instance. - The table can be created with a full set of initial - column families, specified in the request. - - Returns: - Callable[[~.CreateTableRequest], - ~.Table]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_table" not in self._stubs: - self._stubs["create_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", - request_serializer=bigtable_table_admin.CreateTableRequest.serialize, - response_deserializer=gba_table.Table.deserialize, - ) - return self._stubs["create_table"] - - @property - def create_table_from_snapshot( - self, - ) -> Callable[ - [bigtable_table_admin.CreateTableFromSnapshotRequest], operations_pb2.Operation - ]: - r"""Return a callable for the create table from snapshot method over gRPC. - - Creates a new table from the specified snapshot. The - target table must not exist. The snapshot and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.CreateTableFromSnapshotRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_table_from_snapshot" not in self._stubs: - self._stubs[ - "create_table_from_snapshot" - ] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", - request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_table_from_snapshot"] - - @property - def list_tables( - self, - ) -> Callable[ - [bigtable_table_admin.ListTablesRequest], - bigtable_table_admin.ListTablesResponse, - ]: - r"""Return a callable for the list tables method over gRPC. - - Lists all tables served from a specified instance. - - Returns: - Callable[[~.ListTablesRequest], - ~.ListTablesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_tables" not in self._stubs: - self._stubs["list_tables"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", - request_serializer=bigtable_table_admin.ListTablesRequest.serialize, - response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, - ) - return self._stubs["list_tables"] - - @property - def get_table( - self, - ) -> Callable[[bigtable_table_admin.GetTableRequest], table.Table]: - r"""Return a callable for the get table method over gRPC. - - Gets metadata information about the specified table. - - Returns: - Callable[[~.GetTableRequest], - ~.Table]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_table" not in self._stubs: - self._stubs["get_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", - request_serializer=bigtable_table_admin.GetTableRequest.serialize, - response_deserializer=table.Table.deserialize, - ) - return self._stubs["get_table"] - - @property - def update_table( - self, - ) -> Callable[[bigtable_table_admin.UpdateTableRequest], operations_pb2.Operation]: - r"""Return a callable for the update table method over gRPC. - - Updates a specified table. - - Returns: - Callable[[~.UpdateTableRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_table" not in self._stubs: - self._stubs["update_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", - request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_table"] - - @property - def delete_table( - self, - ) -> Callable[[bigtable_table_admin.DeleteTableRequest], empty_pb2.Empty]: - r"""Return a callable for the delete table method over gRPC. - - Permanently deletes a specified table and all of its - data. - - Returns: - Callable[[~.DeleteTableRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_table" not in self._stubs: - self._stubs["delete_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", - request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_table"] - - @property - def undelete_table( - self, - ) -> Callable[ - [bigtable_table_admin.UndeleteTableRequest], operations_pb2.Operation - ]: - r"""Return a callable for the undelete table method over gRPC. - - Restores a specified table which was accidentally - deleted. - - Returns: - Callable[[~.UndeleteTableRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "undelete_table" not in self._stubs: - self._stubs["undelete_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", - request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["undelete_table"] - - @property - def create_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.CreateAuthorizedViewRequest], operations_pb2.Operation - ]: - r"""Return a callable for the create authorized view method over gRPC. - - Creates a new AuthorizedView in a table. - - Returns: - Callable[[~.CreateAuthorizedViewRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_authorized_view" not in self._stubs: - self._stubs["create_authorized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateAuthorizedView", - request_serializer=bigtable_table_admin.CreateAuthorizedViewRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_authorized_view"] - - @property - def list_authorized_views( - self, - ) -> Callable[ - [bigtable_table_admin.ListAuthorizedViewsRequest], - bigtable_table_admin.ListAuthorizedViewsResponse, - ]: - r"""Return a callable for the list authorized views method over gRPC. - - Lists all AuthorizedViews from a specific table. - - Returns: - Callable[[~.ListAuthorizedViewsRequest], - ~.ListAuthorizedViewsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_authorized_views" not in self._stubs: - self._stubs["list_authorized_views"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListAuthorizedViews", - request_serializer=bigtable_table_admin.ListAuthorizedViewsRequest.serialize, - response_deserializer=bigtable_table_admin.ListAuthorizedViewsResponse.deserialize, - ) - return self._stubs["list_authorized_views"] - - @property - def get_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.GetAuthorizedViewRequest], table.AuthorizedView - ]: - r"""Return a callable for the get authorized view method over gRPC. - - Gets information from a specified AuthorizedView. - - Returns: - Callable[[~.GetAuthorizedViewRequest], - ~.AuthorizedView]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_authorized_view" not in self._stubs: - self._stubs["get_authorized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetAuthorizedView", - request_serializer=bigtable_table_admin.GetAuthorizedViewRequest.serialize, - response_deserializer=table.AuthorizedView.deserialize, - ) - return self._stubs["get_authorized_view"] - - @property - def update_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.UpdateAuthorizedViewRequest], operations_pb2.Operation - ]: - r"""Return a callable for the update authorized view method over gRPC. - - Updates an AuthorizedView in a table. - - Returns: - Callable[[~.UpdateAuthorizedViewRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_authorized_view" not in self._stubs: - self._stubs["update_authorized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateAuthorizedView", - request_serializer=bigtable_table_admin.UpdateAuthorizedViewRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_authorized_view"] - - @property - def delete_authorized_view( - self, - ) -> Callable[[bigtable_table_admin.DeleteAuthorizedViewRequest], empty_pb2.Empty]: - r"""Return a callable for the delete authorized view method over gRPC. - - Permanently deletes a specified AuthorizedView. - - Returns: - Callable[[~.DeleteAuthorizedViewRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_authorized_view" not in self._stubs: - self._stubs["delete_authorized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteAuthorizedView", - request_serializer=bigtable_table_admin.DeleteAuthorizedViewRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_authorized_view"] - - @property - def modify_column_families( - self, - ) -> Callable[[bigtable_table_admin.ModifyColumnFamiliesRequest], table.Table]: - r"""Return a callable for the modify column families method over gRPC. - - Performs a series of column family modifications on - the specified table. Either all or none of the - modifications will occur before this method returns, but - data requests received prior to that point may see a - table where only some modifications have taken effect. - - Returns: - Callable[[~.ModifyColumnFamiliesRequest], - ~.Table]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "modify_column_families" not in self._stubs: - self._stubs["modify_column_families"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", - request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, - response_deserializer=table.Table.deserialize, - ) - return self._stubs["modify_column_families"] - - @property - def drop_row_range( - self, - ) -> Callable[[bigtable_table_admin.DropRowRangeRequest], empty_pb2.Empty]: - r"""Return a callable for the drop row range method over gRPC. - - Permanently drop/delete a row range from a specified - table. The request can specify whether to delete all - rows in a table, or only those that match a particular - prefix. - - Returns: - Callable[[~.DropRowRangeRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "drop_row_range" not in self._stubs: - self._stubs["drop_row_range"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", - request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["drop_row_range"] - - @property - def generate_consistency_token( - self, - ) -> Callable[ - [bigtable_table_admin.GenerateConsistencyTokenRequest], - bigtable_table_admin.GenerateConsistencyTokenResponse, - ]: - r"""Return a callable for the generate consistency token method over gRPC. - - Generates a consistency token for a Table, which can - be used in CheckConsistency to check whether mutations - to the table that finished before this call started have - been replicated. The tokens will be available for 90 - days. - - Returns: - Callable[[~.GenerateConsistencyTokenRequest], - ~.GenerateConsistencyTokenResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "generate_consistency_token" not in self._stubs: - self._stubs[ - "generate_consistency_token" - ] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", - request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, - response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, - ) - return self._stubs["generate_consistency_token"] - - @property - def check_consistency( - self, - ) -> Callable[ - [bigtable_table_admin.CheckConsistencyRequest], - bigtable_table_admin.CheckConsistencyResponse, - ]: - r"""Return a callable for the check consistency method over gRPC. - - Checks replication consistency based on a consistency - token, that is, if replication has caught up based on - the conditions specified in the token and the check - request. - - Returns: - Callable[[~.CheckConsistencyRequest], - ~.CheckConsistencyResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "check_consistency" not in self._stubs: - self._stubs["check_consistency"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", - request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, - response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, - ) - return self._stubs["check_consistency"] - - @property - def snapshot_table( - self, - ) -> Callable[ - [bigtable_table_admin.SnapshotTableRequest], operations_pb2.Operation - ]: - r"""Return a callable for the snapshot table method over gRPC. - - Creates a new snapshot in the specified cluster from - the specified source table. The cluster and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.SnapshotTableRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "snapshot_table" not in self._stubs: - self._stubs["snapshot_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", - request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["snapshot_table"] - - @property - def get_snapshot( - self, - ) -> Callable[[bigtable_table_admin.GetSnapshotRequest], table.Snapshot]: - r"""Return a callable for the get snapshot method over gRPC. - - Gets metadata information about the specified - snapshot. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.GetSnapshotRequest], - ~.Snapshot]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_snapshot" not in self._stubs: - self._stubs["get_snapshot"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", - request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, - response_deserializer=table.Snapshot.deserialize, - ) - return self._stubs["get_snapshot"] - - @property - def list_snapshots( - self, - ) -> Callable[ - [bigtable_table_admin.ListSnapshotsRequest], - bigtable_table_admin.ListSnapshotsResponse, - ]: - r"""Return a callable for the list snapshots method over gRPC. - - Lists all snapshots associated with the specified - cluster. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.ListSnapshotsRequest], - ~.ListSnapshotsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_snapshots" not in self._stubs: - self._stubs["list_snapshots"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", - request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, - response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, - ) - return self._stubs["list_snapshots"] - - @property - def delete_snapshot( - self, - ) -> Callable[[bigtable_table_admin.DeleteSnapshotRequest], empty_pb2.Empty]: - r"""Return a callable for the delete snapshot method over gRPC. - - Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.DeleteSnapshotRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_snapshot" not in self._stubs: - self._stubs["delete_snapshot"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", - request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_snapshot"] - - @property - def create_backup( - self, - ) -> Callable[[bigtable_table_admin.CreateBackupRequest], operations_pb2.Operation]: - r"""Return a callable for the create backup method over gRPC. - - Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be - used to track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Backup][google.bigtable.admin.v2.Backup], if successful. - Cancelling the returned operation will stop the creation and - delete the backup. - - Returns: - Callable[[~.CreateBackupRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_backup" not in self._stubs: - self._stubs["create_backup"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", - request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_backup"] - - @property - def get_backup( - self, - ) -> Callable[[bigtable_table_admin.GetBackupRequest], table.Backup]: - r"""Return a callable for the get backup method over gRPC. - - Gets metadata on a pending or completed Cloud - Bigtable Backup. - - Returns: - Callable[[~.GetBackupRequest], - ~.Backup]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_backup" not in self._stubs: - self._stubs["get_backup"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", - request_serializer=bigtable_table_admin.GetBackupRequest.serialize, - response_deserializer=table.Backup.deserialize, - ) - return self._stubs["get_backup"] - - @property - def update_backup( - self, - ) -> Callable[[bigtable_table_admin.UpdateBackupRequest], table.Backup]: - r"""Return a callable for the update backup method over gRPC. - - Updates a pending or completed Cloud Bigtable Backup. - - Returns: - Callable[[~.UpdateBackupRequest], - ~.Backup]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_backup" not in self._stubs: - self._stubs["update_backup"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", - request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, - response_deserializer=table.Backup.deserialize, - ) - return self._stubs["update_backup"] - - @property - def delete_backup( - self, - ) -> Callable[[bigtable_table_admin.DeleteBackupRequest], empty_pb2.Empty]: - r"""Return a callable for the delete backup method over gRPC. - - Deletes a pending or completed Cloud Bigtable backup. - - Returns: - Callable[[~.DeleteBackupRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_backup" not in self._stubs: - self._stubs["delete_backup"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", - request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_backup"] - - @property - def list_backups( - self, - ) -> Callable[ - [bigtable_table_admin.ListBackupsRequest], - bigtable_table_admin.ListBackupsResponse, - ]: - r"""Return a callable for the list backups method over gRPC. - - Lists Cloud Bigtable backups. Returns both completed - and pending backups. - - Returns: - Callable[[~.ListBackupsRequest], - ~.ListBackupsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_backups" not in self._stubs: - self._stubs["list_backups"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", - request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, - response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, - ) - return self._stubs["list_backups"] - - @property - def restore_table( - self, - ) -> Callable[[bigtable_table_admin.RestoreTableRequest], operations_pb2.Operation]: - r"""Return a callable for the restore table method over gRPC. - - Create a new table by restoring from a completed backup. The - returned table [long-running - operation][google.longrunning.Operation] can be used to track - the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. - The [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. - - Returns: - Callable[[~.RestoreTableRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "restore_table" not in self._stubs: - self._stubs["restore_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", - request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["restore_table"] - - @property - def copy_backup( - self, - ) -> Callable[[bigtable_table_admin.CopyBackupRequest], operations_pb2.Operation]: - r"""Return a callable for the copy backup method over gRPC. - - Copy a Cloud Bigtable backup to a new backup in the - destination cluster located in the destination instance - and project. - - Returns: - Callable[[~.CopyBackupRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "copy_backup" not in self._stubs: - self._stubs["copy_backup"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", - request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["copy_backup"] - - @property - def get_iam_policy( - self, - ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: - r"""Return a callable for the get iam policy method over gRPC. - - Gets the access control policy for a Bigtable - resource. Returns an empty policy if the resource exists - but does not have a policy set. - - Returns: - Callable[[~.GetIamPolicyRequest], - ~.Policy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", - request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs["get_iam_policy"] - - @property - def set_iam_policy( - self, - ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: - r"""Return a callable for the set iam policy method over gRPC. - - Sets the access control policy on a Bigtable - resource. Replaces any existing policy. - - Returns: - Callable[[~.SetIamPolicyRequest], - ~.Policy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", - request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs["set_iam_policy"] - - @property - def test_iam_permissions( - self, - ) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - iam_policy_pb2.TestIamPermissionsResponse, - ]: - r"""Return a callable for the test iam permissions method over gRPC. - - Returns permissions that the caller has on the - specified Bigtable resource. - - Returns: - Callable[[~.TestIamPermissionsRequest], - ~.TestIamPermissionsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", - request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, - ) - return self._stubs["test_iam_permissions"] - - @property - def create_schema_bundle( - self, - ) -> Callable[ - [bigtable_table_admin.CreateSchemaBundleRequest], operations_pb2.Operation - ]: - r"""Return a callable for the create schema bundle method over gRPC. - - Creates a new schema bundle in the specified table. - - Returns: - Callable[[~.CreateSchemaBundleRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_schema_bundle" not in self._stubs: - self._stubs["create_schema_bundle"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateSchemaBundle", - request_serializer=bigtable_table_admin.CreateSchemaBundleRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_schema_bundle"] - - @property - def update_schema_bundle( - self, - ) -> Callable[ - [bigtable_table_admin.UpdateSchemaBundleRequest], operations_pb2.Operation - ]: - r"""Return a callable for the update schema bundle method over gRPC. - - Updates a schema bundle in the specified table. - - Returns: - Callable[[~.UpdateSchemaBundleRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_schema_bundle" not in self._stubs: - self._stubs["update_schema_bundle"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateSchemaBundle", - request_serializer=bigtable_table_admin.UpdateSchemaBundleRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_schema_bundle"] - - @property - def get_schema_bundle( - self, - ) -> Callable[[bigtable_table_admin.GetSchemaBundleRequest], table.SchemaBundle]: - r"""Return a callable for the get schema bundle method over gRPC. - - Gets metadata information about the specified schema - bundle. - - Returns: - Callable[[~.GetSchemaBundleRequest], - ~.SchemaBundle]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_schema_bundle" not in self._stubs: - self._stubs["get_schema_bundle"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetSchemaBundle", - request_serializer=bigtable_table_admin.GetSchemaBundleRequest.serialize, - response_deserializer=table.SchemaBundle.deserialize, - ) - return self._stubs["get_schema_bundle"] - - @property - def list_schema_bundles( - self, - ) -> Callable[ - [bigtable_table_admin.ListSchemaBundlesRequest], - bigtable_table_admin.ListSchemaBundlesResponse, - ]: - r"""Return a callable for the list schema bundles method over gRPC. - - Lists all schema bundles associated with the - specified table. - - Returns: - Callable[[~.ListSchemaBundlesRequest], - ~.ListSchemaBundlesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_schema_bundles" not in self._stubs: - self._stubs["list_schema_bundles"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListSchemaBundles", - request_serializer=bigtable_table_admin.ListSchemaBundlesRequest.serialize, - response_deserializer=bigtable_table_admin.ListSchemaBundlesResponse.deserialize, - ) - return self._stubs["list_schema_bundles"] - - @property - def delete_schema_bundle( - self, - ) -> Callable[[bigtable_table_admin.DeleteSchemaBundleRequest], empty_pb2.Empty]: - r"""Return a callable for the delete schema bundle method over gRPC. - - Deletes a schema bundle in the specified table. - - Returns: - Callable[[~.DeleteSchemaBundleRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_schema_bundle" not in self._stubs: - self._stubs["delete_schema_bundle"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSchemaBundle", - request_serializer=bigtable_table_admin.DeleteSchemaBundleRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_schema_bundle"] - - def close(self): - self._logged_channel.close() - - @property - def kind(self) -> str: - return "grpc" - - -__all__ = ("BigtableTableAdminGrpcTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py deleted file mode 100644 index 159a96eda..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ /dev/null @@ -1,1719 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import inspect -import json -import pickle -import logging as std_logging -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import exceptions as core_exceptions -from google.api_core import retry_async as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.protobuf.json_format import MessageToJson -import google.protobuf.message - -import grpc # type: ignore -import proto # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO -from .grpc import BigtableTableAdminGrpcTransport - -try: - from google.api_core import client_logging # type: ignore - - CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER -except ImportError: # pragma: NO COVER - CLIENT_LOGGING_SUPPORTED = False - -_LOGGER = std_logging.getLogger(__name__) - - -class _LoggingClientAIOInterceptor( - grpc.aio.UnaryUnaryClientInterceptor -): # pragma: NO COVER - async def intercept_unary_unary(self, continuation, client_call_details, request): - logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - std_logging.DEBUG - ) - if logging_enabled: # pragma: NO COVER - request_metadata = client_call_details.metadata - if isinstance(request, proto.Message): - request_payload = type(request).to_json(request) - elif isinstance(request, google.protobuf.message.Message): - request_payload = MessageToJson(request) - else: - request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" - - request_metadata = { - key: value.decode("utf-8") if isinstance(value, bytes) else value - for key, value in request_metadata - } - grpc_request = { - "payload": request_payload, - "requestMethod": "grpc", - "metadata": dict(request_metadata), - } - _LOGGER.debug( - f"Sending request for {client_call_details.method}", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": str(client_call_details.method), - "request": grpc_request, - "metadata": grpc_request["metadata"], - }, - ) - response = await continuation(client_call_details, request) - if logging_enabled: # pragma: NO COVER - response_metadata = await response.trailing_metadata() - # Convert gRPC metadata `` to list of tuples - metadata = ( - dict([(k, str(v)) for k, v in response_metadata]) - if response_metadata - else None - ) - result = await response - if isinstance(result, proto.Message): - response_payload = type(result).to_json(result) - elif isinstance(result, google.protobuf.message.Message): - response_payload = MessageToJson(result) - else: - response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" - grpc_response = { - "payload": response_payload, - "metadata": metadata, - "status": "OK", - } - _LOGGER.debug( - f"Received response to rpc {client_call_details.method}.", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": str(client_call_details.method), - "response": grpc_response, - "metadata": grpc_response["metadata"], - }, - ) - return response - - -class BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport): - """gRPC AsyncIO backend transport for BigtableTableAdmin. - - Service for creating, configuring, and deleting Cloud - Bigtable tables. - - Provides access to the table schemas only, not the data stored - within the tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel( - cls, - host: str = "bigtableadmin.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs, - ) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs, - ) - - def __init__( - self, - *, - host: str = "bigtableadmin.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if a ``channel`` instance is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): - A ``Channel`` instance through which to make calls, or a Callable - that constructs and returns one. If set to None, ``self.create_channel`` - is used to create the channel. If a Callable is given, it will be called - with the same arguments as used in ``self.create_channel``. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if a ``channel`` instance is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if isinstance(channel, aio.Channel): - # Ignore credentials if a channel was passed. - credentials = None - self._ignore_credentials = True - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - # initialize with the provided callable or the default channel - channel_init = channel or type(self).create_channel - self._grpc_channel = channel_init( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - self._interceptor = _LoggingClientAIOInterceptor() - self._grpc_channel._unary_unary_interceptors.append(self._interceptor) - self._logged_channel = self._grpc_channel - self._wrap_with_kind = ( - "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters - ) - # Wrap messages. This must be done after self._logged_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self._logged_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_table( - self, - ) -> Callable[ - [bigtable_table_admin.CreateTableRequest], Awaitable[gba_table.Table] - ]: - r"""Return a callable for the create table method over gRPC. - - Creates a new table in the specified instance. - The table can be created with a full set of initial - column families, specified in the request. - - Returns: - Callable[[~.CreateTableRequest], - Awaitable[~.Table]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_table" not in self._stubs: - self._stubs["create_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", - request_serializer=bigtable_table_admin.CreateTableRequest.serialize, - response_deserializer=gba_table.Table.deserialize, - ) - return self._stubs["create_table"] - - @property - def create_table_from_snapshot( - self, - ) -> Callable[ - [bigtable_table_admin.CreateTableFromSnapshotRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the create table from snapshot method over gRPC. - - Creates a new table from the specified snapshot. The - target table must not exist. The snapshot and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.CreateTableFromSnapshotRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_table_from_snapshot" not in self._stubs: - self._stubs[ - "create_table_from_snapshot" - ] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", - request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_table_from_snapshot"] - - @property - def list_tables( - self, - ) -> Callable[ - [bigtable_table_admin.ListTablesRequest], - Awaitable[bigtable_table_admin.ListTablesResponse], - ]: - r"""Return a callable for the list tables method over gRPC. - - Lists all tables served from a specified instance. - - Returns: - Callable[[~.ListTablesRequest], - Awaitable[~.ListTablesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_tables" not in self._stubs: - self._stubs["list_tables"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", - request_serializer=bigtable_table_admin.ListTablesRequest.serialize, - response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, - ) - return self._stubs["list_tables"] - - @property - def get_table( - self, - ) -> Callable[[bigtable_table_admin.GetTableRequest], Awaitable[table.Table]]: - r"""Return a callable for the get table method over gRPC. - - Gets metadata information about the specified table. - - Returns: - Callable[[~.GetTableRequest], - Awaitable[~.Table]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_table" not in self._stubs: - self._stubs["get_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", - request_serializer=bigtable_table_admin.GetTableRequest.serialize, - response_deserializer=table.Table.deserialize, - ) - return self._stubs["get_table"] - - @property - def update_table( - self, - ) -> Callable[ - [bigtable_table_admin.UpdateTableRequest], Awaitable[operations_pb2.Operation] - ]: - r"""Return a callable for the update table method over gRPC. - - Updates a specified table. - - Returns: - Callable[[~.UpdateTableRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_table" not in self._stubs: - self._stubs["update_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", - request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_table"] - - @property - def delete_table( - self, - ) -> Callable[ - [bigtable_table_admin.DeleteTableRequest], Awaitable[empty_pb2.Empty] - ]: - r"""Return a callable for the delete table method over gRPC. - - Permanently deletes a specified table and all of its - data. - - Returns: - Callable[[~.DeleteTableRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_table" not in self._stubs: - self._stubs["delete_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", - request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_table"] - - @property - def undelete_table( - self, - ) -> Callable[ - [bigtable_table_admin.UndeleteTableRequest], Awaitable[operations_pb2.Operation] - ]: - r"""Return a callable for the undelete table method over gRPC. - - Restores a specified table which was accidentally - deleted. - - Returns: - Callable[[~.UndeleteTableRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "undelete_table" not in self._stubs: - self._stubs["undelete_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", - request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["undelete_table"] - - @property - def create_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.CreateAuthorizedViewRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the create authorized view method over gRPC. - - Creates a new AuthorizedView in a table. - - Returns: - Callable[[~.CreateAuthorizedViewRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_authorized_view" not in self._stubs: - self._stubs["create_authorized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateAuthorizedView", - request_serializer=bigtable_table_admin.CreateAuthorizedViewRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_authorized_view"] - - @property - def list_authorized_views( - self, - ) -> Callable[ - [bigtable_table_admin.ListAuthorizedViewsRequest], - Awaitable[bigtable_table_admin.ListAuthorizedViewsResponse], - ]: - r"""Return a callable for the list authorized views method over gRPC. - - Lists all AuthorizedViews from a specific table. - - Returns: - Callable[[~.ListAuthorizedViewsRequest], - Awaitable[~.ListAuthorizedViewsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_authorized_views" not in self._stubs: - self._stubs["list_authorized_views"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListAuthorizedViews", - request_serializer=bigtable_table_admin.ListAuthorizedViewsRequest.serialize, - response_deserializer=bigtable_table_admin.ListAuthorizedViewsResponse.deserialize, - ) - return self._stubs["list_authorized_views"] - - @property - def get_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.GetAuthorizedViewRequest], Awaitable[table.AuthorizedView] - ]: - r"""Return a callable for the get authorized view method over gRPC. - - Gets information from a specified AuthorizedView. - - Returns: - Callable[[~.GetAuthorizedViewRequest], - Awaitable[~.AuthorizedView]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_authorized_view" not in self._stubs: - self._stubs["get_authorized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetAuthorizedView", - request_serializer=bigtable_table_admin.GetAuthorizedViewRequest.serialize, - response_deserializer=table.AuthorizedView.deserialize, - ) - return self._stubs["get_authorized_view"] - - @property - def update_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.UpdateAuthorizedViewRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the update authorized view method over gRPC. - - Updates an AuthorizedView in a table. - - Returns: - Callable[[~.UpdateAuthorizedViewRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_authorized_view" not in self._stubs: - self._stubs["update_authorized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateAuthorizedView", - request_serializer=bigtable_table_admin.UpdateAuthorizedViewRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_authorized_view"] - - @property - def delete_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.DeleteAuthorizedViewRequest], Awaitable[empty_pb2.Empty] - ]: - r"""Return a callable for the delete authorized view method over gRPC. - - Permanently deletes a specified AuthorizedView. - - Returns: - Callable[[~.DeleteAuthorizedViewRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_authorized_view" not in self._stubs: - self._stubs["delete_authorized_view"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteAuthorizedView", - request_serializer=bigtable_table_admin.DeleteAuthorizedViewRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_authorized_view"] - - @property - def modify_column_families( - self, - ) -> Callable[ - [bigtable_table_admin.ModifyColumnFamiliesRequest], Awaitable[table.Table] - ]: - r"""Return a callable for the modify column families method over gRPC. - - Performs a series of column family modifications on - the specified table. Either all or none of the - modifications will occur before this method returns, but - data requests received prior to that point may see a - table where only some modifications have taken effect. - - Returns: - Callable[[~.ModifyColumnFamiliesRequest], - Awaitable[~.Table]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "modify_column_families" not in self._stubs: - self._stubs["modify_column_families"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", - request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, - response_deserializer=table.Table.deserialize, - ) - return self._stubs["modify_column_families"] - - @property - def drop_row_range( - self, - ) -> Callable[ - [bigtable_table_admin.DropRowRangeRequest], Awaitable[empty_pb2.Empty] - ]: - r"""Return a callable for the drop row range method over gRPC. - - Permanently drop/delete a row range from a specified - table. The request can specify whether to delete all - rows in a table, or only those that match a particular - prefix. - - Returns: - Callable[[~.DropRowRangeRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "drop_row_range" not in self._stubs: - self._stubs["drop_row_range"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", - request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["drop_row_range"] - - @property - def generate_consistency_token( - self, - ) -> Callable[ - [bigtable_table_admin.GenerateConsistencyTokenRequest], - Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse], - ]: - r"""Return a callable for the generate consistency token method over gRPC. - - Generates a consistency token for a Table, which can - be used in CheckConsistency to check whether mutations - to the table that finished before this call started have - been replicated. The tokens will be available for 90 - days. - - Returns: - Callable[[~.GenerateConsistencyTokenRequest], - Awaitable[~.GenerateConsistencyTokenResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "generate_consistency_token" not in self._stubs: - self._stubs[ - "generate_consistency_token" - ] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", - request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, - response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, - ) - return self._stubs["generate_consistency_token"] - - @property - def check_consistency( - self, - ) -> Callable[ - [bigtable_table_admin.CheckConsistencyRequest], - Awaitable[bigtable_table_admin.CheckConsistencyResponse], - ]: - r"""Return a callable for the check consistency method over gRPC. - - Checks replication consistency based on a consistency - token, that is, if replication has caught up based on - the conditions specified in the token and the check - request. - - Returns: - Callable[[~.CheckConsistencyRequest], - Awaitable[~.CheckConsistencyResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "check_consistency" not in self._stubs: - self._stubs["check_consistency"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", - request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, - response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, - ) - return self._stubs["check_consistency"] - - @property - def snapshot_table( - self, - ) -> Callable[ - [bigtable_table_admin.SnapshotTableRequest], Awaitable[operations_pb2.Operation] - ]: - r"""Return a callable for the snapshot table method over gRPC. - - Creates a new snapshot in the specified cluster from - the specified source table. The cluster and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.SnapshotTableRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "snapshot_table" not in self._stubs: - self._stubs["snapshot_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", - request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["snapshot_table"] - - @property - def get_snapshot( - self, - ) -> Callable[[bigtable_table_admin.GetSnapshotRequest], Awaitable[table.Snapshot]]: - r"""Return a callable for the get snapshot method over gRPC. - - Gets metadata information about the specified - snapshot. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.GetSnapshotRequest], - Awaitable[~.Snapshot]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_snapshot" not in self._stubs: - self._stubs["get_snapshot"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", - request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, - response_deserializer=table.Snapshot.deserialize, - ) - return self._stubs["get_snapshot"] - - @property - def list_snapshots( - self, - ) -> Callable[ - [bigtable_table_admin.ListSnapshotsRequest], - Awaitable[bigtable_table_admin.ListSnapshotsResponse], - ]: - r"""Return a callable for the list snapshots method over gRPC. - - Lists all snapshots associated with the specified - cluster. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.ListSnapshotsRequest], - Awaitable[~.ListSnapshotsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_snapshots" not in self._stubs: - self._stubs["list_snapshots"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", - request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, - response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, - ) - return self._stubs["list_snapshots"] - - @property - def delete_snapshot( - self, - ) -> Callable[ - [bigtable_table_admin.DeleteSnapshotRequest], Awaitable[empty_pb2.Empty] - ]: - r"""Return a callable for the delete snapshot method over gRPC. - - Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.DeleteSnapshotRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_snapshot" not in self._stubs: - self._stubs["delete_snapshot"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", - request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_snapshot"] - - @property - def create_backup( - self, - ) -> Callable[ - [bigtable_table_admin.CreateBackupRequest], Awaitable[operations_pb2.Operation] - ]: - r"""Return a callable for the create backup method over gRPC. - - Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be - used to track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Backup][google.bigtable.admin.v2.Backup], if successful. - Cancelling the returned operation will stop the creation and - delete the backup. - - Returns: - Callable[[~.CreateBackupRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_backup" not in self._stubs: - self._stubs["create_backup"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", - request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_backup"] - - @property - def get_backup( - self, - ) -> Callable[[bigtable_table_admin.GetBackupRequest], Awaitable[table.Backup]]: - r"""Return a callable for the get backup method over gRPC. - - Gets metadata on a pending or completed Cloud - Bigtable Backup. - - Returns: - Callable[[~.GetBackupRequest], - Awaitable[~.Backup]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_backup" not in self._stubs: - self._stubs["get_backup"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", - request_serializer=bigtable_table_admin.GetBackupRequest.serialize, - response_deserializer=table.Backup.deserialize, - ) - return self._stubs["get_backup"] - - @property - def update_backup( - self, - ) -> Callable[[bigtable_table_admin.UpdateBackupRequest], Awaitable[table.Backup]]: - r"""Return a callable for the update backup method over gRPC. - - Updates a pending or completed Cloud Bigtable Backup. - - Returns: - Callable[[~.UpdateBackupRequest], - Awaitable[~.Backup]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_backup" not in self._stubs: - self._stubs["update_backup"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", - request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, - response_deserializer=table.Backup.deserialize, - ) - return self._stubs["update_backup"] - - @property - def delete_backup( - self, - ) -> Callable[ - [bigtable_table_admin.DeleteBackupRequest], Awaitable[empty_pb2.Empty] - ]: - r"""Return a callable for the delete backup method over gRPC. - - Deletes a pending or completed Cloud Bigtable backup. - - Returns: - Callable[[~.DeleteBackupRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_backup" not in self._stubs: - self._stubs["delete_backup"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", - request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_backup"] - - @property - def list_backups( - self, - ) -> Callable[ - [bigtable_table_admin.ListBackupsRequest], - Awaitable[bigtable_table_admin.ListBackupsResponse], - ]: - r"""Return a callable for the list backups method over gRPC. - - Lists Cloud Bigtable backups. Returns both completed - and pending backups. - - Returns: - Callable[[~.ListBackupsRequest], - Awaitable[~.ListBackupsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_backups" not in self._stubs: - self._stubs["list_backups"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", - request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, - response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, - ) - return self._stubs["list_backups"] - - @property - def restore_table( - self, - ) -> Callable[ - [bigtable_table_admin.RestoreTableRequest], Awaitable[operations_pb2.Operation] - ]: - r"""Return a callable for the restore table method over gRPC. - - Create a new table by restoring from a completed backup. The - returned table [long-running - operation][google.longrunning.Operation] can be used to track - the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. - The [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. - - Returns: - Callable[[~.RestoreTableRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "restore_table" not in self._stubs: - self._stubs["restore_table"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", - request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["restore_table"] - - @property - def copy_backup( - self, - ) -> Callable[ - [bigtable_table_admin.CopyBackupRequest], Awaitable[operations_pb2.Operation] - ]: - r"""Return a callable for the copy backup method over gRPC. - - Copy a Cloud Bigtable backup to a new backup in the - destination cluster located in the destination instance - and project. - - Returns: - Callable[[~.CopyBackupRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "copy_backup" not in self._stubs: - self._stubs["copy_backup"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", - request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["copy_backup"] - - @property - def get_iam_policy( - self, - ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: - r"""Return a callable for the get iam policy method over gRPC. - - Gets the access control policy for a Bigtable - resource. Returns an empty policy if the resource exists - but does not have a policy set. - - Returns: - Callable[[~.GetIamPolicyRequest], - Awaitable[~.Policy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", - request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs["get_iam_policy"] - - @property - def set_iam_policy( - self, - ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: - r"""Return a callable for the set iam policy method over gRPC. - - Sets the access control policy on a Bigtable - resource. Replaces any existing policy. - - Returns: - Callable[[~.SetIamPolicyRequest], - Awaitable[~.Policy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", - request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs["set_iam_policy"] - - @property - def test_iam_permissions( - self, - ) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - Awaitable[iam_policy_pb2.TestIamPermissionsResponse], - ]: - r"""Return a callable for the test iam permissions method over gRPC. - - Returns permissions that the caller has on the - specified Bigtable resource. - - Returns: - Callable[[~.TestIamPermissionsRequest], - Awaitable[~.TestIamPermissionsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", - request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, - ) - return self._stubs["test_iam_permissions"] - - @property - def create_schema_bundle( - self, - ) -> Callable[ - [bigtable_table_admin.CreateSchemaBundleRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the create schema bundle method over gRPC. - - Creates a new schema bundle in the specified table. - - Returns: - Callable[[~.CreateSchemaBundleRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "create_schema_bundle" not in self._stubs: - self._stubs["create_schema_bundle"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateSchemaBundle", - request_serializer=bigtable_table_admin.CreateSchemaBundleRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["create_schema_bundle"] - - @property - def update_schema_bundle( - self, - ) -> Callable[ - [bigtable_table_admin.UpdateSchemaBundleRequest], - Awaitable[operations_pb2.Operation], - ]: - r"""Return a callable for the update schema bundle method over gRPC. - - Updates a schema bundle in the specified table. - - Returns: - Callable[[~.UpdateSchemaBundleRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "update_schema_bundle" not in self._stubs: - self._stubs["update_schema_bundle"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateSchemaBundle", - request_serializer=bigtable_table_admin.UpdateSchemaBundleRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs["update_schema_bundle"] - - @property - def get_schema_bundle( - self, - ) -> Callable[ - [bigtable_table_admin.GetSchemaBundleRequest], Awaitable[table.SchemaBundle] - ]: - r"""Return a callable for the get schema bundle method over gRPC. - - Gets metadata information about the specified schema - bundle. - - Returns: - Callable[[~.GetSchemaBundleRequest], - Awaitable[~.SchemaBundle]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "get_schema_bundle" not in self._stubs: - self._stubs["get_schema_bundle"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetSchemaBundle", - request_serializer=bigtable_table_admin.GetSchemaBundleRequest.serialize, - response_deserializer=table.SchemaBundle.deserialize, - ) - return self._stubs["get_schema_bundle"] - - @property - def list_schema_bundles( - self, - ) -> Callable[ - [bigtable_table_admin.ListSchemaBundlesRequest], - Awaitable[bigtable_table_admin.ListSchemaBundlesResponse], - ]: - r"""Return a callable for the list schema bundles method over gRPC. - - Lists all schema bundles associated with the - specified table. - - Returns: - Callable[[~.ListSchemaBundlesRequest], - Awaitable[~.ListSchemaBundlesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "list_schema_bundles" not in self._stubs: - self._stubs["list_schema_bundles"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListSchemaBundles", - request_serializer=bigtable_table_admin.ListSchemaBundlesRequest.serialize, - response_deserializer=bigtable_table_admin.ListSchemaBundlesResponse.deserialize, - ) - return self._stubs["list_schema_bundles"] - - @property - def delete_schema_bundle( - self, - ) -> Callable[ - [bigtable_table_admin.DeleteSchemaBundleRequest], Awaitable[empty_pb2.Empty] - ]: - r"""Return a callable for the delete schema bundle method over gRPC. - - Deletes a schema bundle in the specified table. - - Returns: - Callable[[~.DeleteSchemaBundleRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if "delete_schema_bundle" not in self._stubs: - self._stubs["delete_schema_bundle"] = self._logged_channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSchemaBundle", - request_serializer=bigtable_table_admin.DeleteSchemaBundleRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs["delete_schema_bundle"] - - def _prep_wrapped_messages(self, client_info): - """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" - self._wrapped_methods = { - self.create_table: self._wrap_method( - self.create_table, - default_timeout=300.0, - client_info=client_info, - ), - self.create_table_from_snapshot: self._wrap_method( - self.create_table_from_snapshot, - default_timeout=None, - client_info=client_info, - ), - self.list_tables: self._wrap_method( - self.list_tables, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.get_table: self._wrap_method( - self.get_table, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_table: self._wrap_method( - self.update_table, - default_timeout=None, - client_info=client_info, - ), - self.delete_table: self._wrap_method( - self.delete_table, - default_timeout=300.0, - client_info=client_info, - ), - self.undelete_table: self._wrap_method( - self.undelete_table, - default_timeout=None, - client_info=client_info, - ), - self.create_authorized_view: self._wrap_method( - self.create_authorized_view, - default_timeout=None, - client_info=client_info, - ), - self.list_authorized_views: self._wrap_method( - self.list_authorized_views, - default_timeout=None, - client_info=client_info, - ), - self.get_authorized_view: self._wrap_method( - self.get_authorized_view, - default_timeout=None, - client_info=client_info, - ), - self.update_authorized_view: self._wrap_method( - self.update_authorized_view, - default_timeout=None, - client_info=client_info, - ), - self.delete_authorized_view: self._wrap_method( - self.delete_authorized_view, - default_timeout=None, - client_info=client_info, - ), - self.modify_column_families: self._wrap_method( - self.modify_column_families, - default_timeout=300.0, - client_info=client_info, - ), - self.drop_row_range: self._wrap_method( - self.drop_row_range, - default_timeout=3600.0, - client_info=client_info, - ), - self.generate_consistency_token: self._wrap_method( - self.generate_consistency_token, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.check_consistency: self._wrap_method( - self.check_consistency, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=3600.0, - ), - default_timeout=3600.0, - client_info=client_info, - ), - self.snapshot_table: self._wrap_method( - self.snapshot_table, - default_timeout=None, - client_info=client_info, - ), - self.get_snapshot: self._wrap_method( - self.get_snapshot, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_snapshots: self._wrap_method( - self.list_snapshots, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.delete_snapshot: self._wrap_method( - self.delete_snapshot, - default_timeout=300.0, - client_info=client_info, - ), - self.create_backup: self._wrap_method( - self.create_backup, - default_timeout=60.0, - client_info=client_info, - ), - self.get_backup: self._wrap_method( - self.get_backup, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_backup: self._wrap_method( - self.update_backup, - default_timeout=60.0, - client_info=client_info, - ), - self.delete_backup: self._wrap_method( - self.delete_backup, - default_timeout=300.0, - client_info=client_info, - ), - self.list_backups: self._wrap_method( - self.list_backups, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.restore_table: self._wrap_method( - self.restore_table, - default_timeout=60.0, - client_info=client_info, - ), - self.copy_backup: self._wrap_method( - self.copy_backup, - default_timeout=None, - client_info=client_info, - ), - self.get_iam_policy: self._wrap_method( - self.get_iam_policy, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.set_iam_policy: self._wrap_method( - self.set_iam_policy, - default_timeout=60.0, - client_info=client_info, - ), - self.test_iam_permissions: self._wrap_method( - self.test_iam_permissions, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.create_schema_bundle: self._wrap_method( - self.create_schema_bundle, - default_timeout=None, - client_info=client_info, - ), - self.update_schema_bundle: self._wrap_method( - self.update_schema_bundle, - default_timeout=None, - client_info=client_info, - ), - self.get_schema_bundle: self._wrap_method( - self.get_schema_bundle, - default_timeout=None, - client_info=client_info, - ), - self.list_schema_bundles: self._wrap_method( - self.list_schema_bundles, - default_timeout=None, - client_info=client_info, - ), - self.delete_schema_bundle: self._wrap_method( - self.delete_schema_bundle, - default_timeout=None, - client_info=client_info, - ), - } - - def _wrap_method(self, func, *args, **kwargs): - if self._wrap_with_kind: # pragma: NO COVER - kwargs["kind"] = self.kind - return gapic_v1.method_async.wrap_method(func, *args, **kwargs) - - def close(self): - return self._logged_channel.close() - - @property - def kind(self) -> str: - return "grpc_asyncio" - - -__all__ = ("BigtableTableAdminGrpcAsyncIOTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py deleted file mode 100644 index ec2462d4a..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ /dev/null @@ -1,7638 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import logging -import json # type: ignore - -from google.auth.transport.requests import AuthorizedSession # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.api_core import exceptions as core_exceptions -from google.api_core import retry as retries -from google.api_core import rest_helpers -from google.api_core import rest_streaming -from google.api_core import gapic_v1 -import google.protobuf - -from google.protobuf import json_format -from google.api_core import operations_v1 - -from requests import __version__ as requests_version -import dataclasses -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -import warnings - - -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore - - -from .rest_base import _BaseBigtableTableAdminRestTransport -from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - -try: - from google.api_core import client_logging # type: ignore - - CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER -except ImportError: # pragma: NO COVER - CLIENT_LOGGING_SUPPORTED = False - -_LOGGER = logging.getLogger(__name__) - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, - grpc_version=None, - rest_version=f"requests@{requests_version}", -) - -if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER - DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ - - -class BigtableTableAdminRestInterceptor: - """Interceptor for BigtableTableAdmin. - - Interceptors are used to manipulate requests, request metadata, and responses - in arbitrary ways. - Example use cases include: - * Logging - * Verifying requests according to service or custom semantics - * Stripping extraneous information from responses - - These use cases and more can be enabled by injecting an - instance of a custom subclass when constructing the BigtableTableAdminRestTransport. - - .. code-block:: python - class MyCustomBigtableTableAdminInterceptor(BigtableTableAdminRestInterceptor): - def pre_check_consistency(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_check_consistency(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_copy_backup(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_copy_backup(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_authorized_view(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_authorized_view(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_backup(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_backup(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_schema_bundle(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_schema_bundle(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_table(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_table_from_snapshot(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_table_from_snapshot(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_delete_authorized_view(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_delete_backup(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_delete_schema_bundle(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_delete_snapshot(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_delete_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_drop_row_range(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_generate_consistency_token(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_generate_consistency_token(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_authorized_view(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_authorized_view(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_backup(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_backup(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_iam_policy(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_iam_policy(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_schema_bundle(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_schema_bundle(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_snapshot(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_snapshot(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_table(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_authorized_views(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_authorized_views(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_backups(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_backups(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_schema_bundles(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_schema_bundles(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_snapshots(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_snapshots(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_tables(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_tables(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_modify_column_families(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_modify_column_families(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_restore_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_restore_table(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_set_iam_policy(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_set_iam_policy(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_snapshot_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_snapshot_table(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_test_iam_permissions(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_test_iam_permissions(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_undelete_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_undelete_table(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_authorized_view(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_authorized_view(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_backup(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_backup(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_schema_bundle(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_schema_bundle(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_table(self, response): - logging.log(f"Received response: {response}") - return response - - transport = BigtableTableAdminRestTransport(interceptor=MyCustomBigtableTableAdminInterceptor()) - client = BaseBigtableTableAdminClient(transport=transport) - - - """ - - def pre_check_consistency( - self, - request: bigtable_table_admin.CheckConsistencyRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.CheckConsistencyRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for check_consistency - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_check_consistency( - self, response: bigtable_table_admin.CheckConsistencyResponse - ) -> bigtable_table_admin.CheckConsistencyResponse: - """Post-rpc interceptor for check_consistency - - DEPRECATED. Please use the `post_check_consistency_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_check_consistency` interceptor runs - before the `post_check_consistency_with_metadata` interceptor. - """ - return response - - def post_check_consistency_with_metadata( - self, - response: bigtable_table_admin.CheckConsistencyResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.CheckConsistencyResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for check_consistency - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_check_consistency_with_metadata` - interceptor in new development instead of the `post_check_consistency` interceptor. - When both interceptors are used, this `post_check_consistency_with_metadata` interceptor runs after the - `post_check_consistency` interceptor. The (possibly modified) response returned by - `post_check_consistency` will be passed to - `post_check_consistency_with_metadata`. - """ - return response, metadata - - def pre_copy_backup( - self, - request: bigtable_table_admin.CopyBackupRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for copy_backup - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_copy_backup( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for copy_backup - - DEPRECATED. Please use the `post_copy_backup_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_copy_backup` interceptor runs - before the `post_copy_backup_with_metadata` interceptor. - """ - return response - - def post_copy_backup_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for copy_backup - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_copy_backup_with_metadata` - interceptor in new development instead of the `post_copy_backup` interceptor. - When both interceptors are used, this `post_copy_backup_with_metadata` interceptor runs after the - `post_copy_backup` interceptor. The (possibly modified) response returned by - `post_copy_backup` will be passed to - `post_copy_backup_with_metadata`. - """ - return response, metadata - - def pre_create_authorized_view( - self, - request: bigtable_table_admin.CreateAuthorizedViewRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.CreateAuthorizedViewRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for create_authorized_view - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_create_authorized_view( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for create_authorized_view - - DEPRECATED. Please use the `post_create_authorized_view_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_create_authorized_view` interceptor runs - before the `post_create_authorized_view_with_metadata` interceptor. - """ - return response - - def post_create_authorized_view_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for create_authorized_view - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_create_authorized_view_with_metadata` - interceptor in new development instead of the `post_create_authorized_view` interceptor. - When both interceptors are used, this `post_create_authorized_view_with_metadata` interceptor runs after the - `post_create_authorized_view` interceptor. The (possibly modified) response returned by - `post_create_authorized_view` will be passed to - `post_create_authorized_view_with_metadata`. - """ - return response, metadata - - def pre_create_backup( - self, - request: bigtable_table_admin.CreateBackupRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.CreateBackupRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for create_backup - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_create_backup( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for create_backup - - DEPRECATED. Please use the `post_create_backup_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_create_backup` interceptor runs - before the `post_create_backup_with_metadata` interceptor. - """ - return response - - def post_create_backup_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for create_backup - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_create_backup_with_metadata` - interceptor in new development instead of the `post_create_backup` interceptor. - When both interceptors are used, this `post_create_backup_with_metadata` interceptor runs after the - `post_create_backup` interceptor. The (possibly modified) response returned by - `post_create_backup` will be passed to - `post_create_backup_with_metadata`. - """ - return response, metadata - - def pre_create_schema_bundle( - self, - request: bigtable_table_admin.CreateSchemaBundleRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.CreateSchemaBundleRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for create_schema_bundle - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_create_schema_bundle( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for create_schema_bundle - - DEPRECATED. Please use the `post_create_schema_bundle_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_create_schema_bundle` interceptor runs - before the `post_create_schema_bundle_with_metadata` interceptor. - """ - return response - - def post_create_schema_bundle_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for create_schema_bundle - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_create_schema_bundle_with_metadata` - interceptor in new development instead of the `post_create_schema_bundle` interceptor. - When both interceptors are used, this `post_create_schema_bundle_with_metadata` interceptor runs after the - `post_create_schema_bundle` interceptor. The (possibly modified) response returned by - `post_create_schema_bundle` will be passed to - `post_create_schema_bundle_with_metadata`. - """ - return response, metadata - - def pre_create_table( - self, - request: bigtable_table_admin.CreateTableRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.CreateTableRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for create_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_create_table(self, response: gba_table.Table) -> gba_table.Table: - """Post-rpc interceptor for create_table - - DEPRECATED. Please use the `post_create_table_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_create_table` interceptor runs - before the `post_create_table_with_metadata` interceptor. - """ - return response - - def post_create_table_with_metadata( - self, - response: gba_table.Table, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[gba_table.Table, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for create_table - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_create_table_with_metadata` - interceptor in new development instead of the `post_create_table` interceptor. - When both interceptors are used, this `post_create_table_with_metadata` interceptor runs after the - `post_create_table` interceptor. The (possibly modified) response returned by - `post_create_table` will be passed to - `post_create_table_with_metadata`. - """ - return response, metadata - - def pre_create_table_from_snapshot( - self, - request: bigtable_table_admin.CreateTableFromSnapshotRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.CreateTableFromSnapshotRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for create_table_from_snapshot - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_create_table_from_snapshot( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for create_table_from_snapshot - - DEPRECATED. Please use the `post_create_table_from_snapshot_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_create_table_from_snapshot` interceptor runs - before the `post_create_table_from_snapshot_with_metadata` interceptor. - """ - return response - - def post_create_table_from_snapshot_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for create_table_from_snapshot - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_create_table_from_snapshot_with_metadata` - interceptor in new development instead of the `post_create_table_from_snapshot` interceptor. - When both interceptors are used, this `post_create_table_from_snapshot_with_metadata` interceptor runs after the - `post_create_table_from_snapshot` interceptor. The (possibly modified) response returned by - `post_create_table_from_snapshot` will be passed to - `post_create_table_from_snapshot_with_metadata`. - """ - return response, metadata - - def pre_delete_authorized_view( - self, - request: bigtable_table_admin.DeleteAuthorizedViewRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.DeleteAuthorizedViewRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for delete_authorized_view - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def pre_delete_backup( - self, - request: bigtable_table_admin.DeleteBackupRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.DeleteBackupRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for delete_backup - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def pre_delete_schema_bundle( - self, - request: bigtable_table_admin.DeleteSchemaBundleRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.DeleteSchemaBundleRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for delete_schema_bundle - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def pre_delete_snapshot( - self, - request: bigtable_table_admin.DeleteSnapshotRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.DeleteSnapshotRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for delete_snapshot - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def pre_delete_table( - self, - request: bigtable_table_admin.DeleteTableRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.DeleteTableRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for delete_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def pre_drop_row_range( - self, - request: bigtable_table_admin.DropRowRangeRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.DropRowRangeRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for drop_row_range - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def pre_generate_consistency_token( - self, - request: bigtable_table_admin.GenerateConsistencyTokenRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.GenerateConsistencyTokenRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for generate_consistency_token - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_generate_consistency_token( - self, response: bigtable_table_admin.GenerateConsistencyTokenResponse - ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: - """Post-rpc interceptor for generate_consistency_token - - DEPRECATED. Please use the `post_generate_consistency_token_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_generate_consistency_token` interceptor runs - before the `post_generate_consistency_token_with_metadata` interceptor. - """ - return response - - def post_generate_consistency_token_with_metadata( - self, - response: bigtable_table_admin.GenerateConsistencyTokenResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.GenerateConsistencyTokenResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for generate_consistency_token - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_generate_consistency_token_with_metadata` - interceptor in new development instead of the `post_generate_consistency_token` interceptor. - When both interceptors are used, this `post_generate_consistency_token_with_metadata` interceptor runs after the - `post_generate_consistency_token` interceptor. The (possibly modified) response returned by - `post_generate_consistency_token` will be passed to - `post_generate_consistency_token_with_metadata`. - """ - return response, metadata - - def pre_get_authorized_view( - self, - request: bigtable_table_admin.GetAuthorizedViewRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.GetAuthorizedViewRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for get_authorized_view - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_get_authorized_view( - self, response: table.AuthorizedView - ) -> table.AuthorizedView: - """Post-rpc interceptor for get_authorized_view - - DEPRECATED. Please use the `post_get_authorized_view_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_get_authorized_view` interceptor runs - before the `post_get_authorized_view_with_metadata` interceptor. - """ - return response - - def post_get_authorized_view_with_metadata( - self, - response: table.AuthorizedView, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[table.AuthorizedView, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_authorized_view - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_get_authorized_view_with_metadata` - interceptor in new development instead of the `post_get_authorized_view` interceptor. - When both interceptors are used, this `post_get_authorized_view_with_metadata` interceptor runs after the - `post_get_authorized_view` interceptor. The (possibly modified) response returned by - `post_get_authorized_view` will be passed to - `post_get_authorized_view_with_metadata`. - """ - return response, metadata - - def pre_get_backup( - self, - request: bigtable_table_admin.GetBackupRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.GetBackupRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for get_backup - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_get_backup(self, response: table.Backup) -> table.Backup: - """Post-rpc interceptor for get_backup - - DEPRECATED. Please use the `post_get_backup_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_get_backup` interceptor runs - before the `post_get_backup_with_metadata` interceptor. - """ - return response - - def post_get_backup_with_metadata( - self, response: table.Backup, metadata: Sequence[Tuple[str, Union[str, bytes]]] - ) -> Tuple[table.Backup, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_backup - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_get_backup_with_metadata` - interceptor in new development instead of the `post_get_backup` interceptor. - When both interceptors are used, this `post_get_backup_with_metadata` interceptor runs after the - `post_get_backup` interceptor. The (possibly modified) response returned by - `post_get_backup` will be passed to - `post_get_backup_with_metadata`. - """ - return response, metadata - - def pre_get_iam_policy( - self, - request: iam_policy_pb2.GetIamPolicyRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for get_iam_policy - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: - """Post-rpc interceptor for get_iam_policy - - DEPRECATED. Please use the `post_get_iam_policy_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_get_iam_policy` interceptor runs - before the `post_get_iam_policy_with_metadata` interceptor. - """ - return response - - def post_get_iam_policy_with_metadata( - self, - response: policy_pb2.Policy, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_iam_policy - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_get_iam_policy_with_metadata` - interceptor in new development instead of the `post_get_iam_policy` interceptor. - When both interceptors are used, this `post_get_iam_policy_with_metadata` interceptor runs after the - `post_get_iam_policy` interceptor. The (possibly modified) response returned by - `post_get_iam_policy` will be passed to - `post_get_iam_policy_with_metadata`. - """ - return response, metadata - - def pre_get_schema_bundle( - self, - request: bigtable_table_admin.GetSchemaBundleRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.GetSchemaBundleRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for get_schema_bundle - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_get_schema_bundle( - self, response: table.SchemaBundle - ) -> table.SchemaBundle: - """Post-rpc interceptor for get_schema_bundle - - DEPRECATED. Please use the `post_get_schema_bundle_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_get_schema_bundle` interceptor runs - before the `post_get_schema_bundle_with_metadata` interceptor. - """ - return response - - def post_get_schema_bundle_with_metadata( - self, - response: table.SchemaBundle, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[table.SchemaBundle, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_schema_bundle - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_get_schema_bundle_with_metadata` - interceptor in new development instead of the `post_get_schema_bundle` interceptor. - When both interceptors are used, this `post_get_schema_bundle_with_metadata` interceptor runs after the - `post_get_schema_bundle` interceptor. The (possibly modified) response returned by - `post_get_schema_bundle` will be passed to - `post_get_schema_bundle_with_metadata`. - """ - return response, metadata - - def pre_get_snapshot( - self, - request: bigtable_table_admin.GetSnapshotRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.GetSnapshotRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for get_snapshot - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_get_snapshot(self, response: table.Snapshot) -> table.Snapshot: - """Post-rpc interceptor for get_snapshot - - DEPRECATED. Please use the `post_get_snapshot_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_get_snapshot` interceptor runs - before the `post_get_snapshot_with_metadata` interceptor. - """ - return response - - def post_get_snapshot_with_metadata( - self, - response: table.Snapshot, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[table.Snapshot, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_snapshot - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_get_snapshot_with_metadata` - interceptor in new development instead of the `post_get_snapshot` interceptor. - When both interceptors are used, this `post_get_snapshot_with_metadata` interceptor runs after the - `post_get_snapshot` interceptor. The (possibly modified) response returned by - `post_get_snapshot` will be passed to - `post_get_snapshot_with_metadata`. - """ - return response, metadata - - def pre_get_table( - self, - request: bigtable_table_admin.GetTableRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.GetTableRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for get_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_get_table(self, response: table.Table) -> table.Table: - """Post-rpc interceptor for get_table - - DEPRECATED. Please use the `post_get_table_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_get_table` interceptor runs - before the `post_get_table_with_metadata` interceptor. - """ - return response - - def post_get_table_with_metadata( - self, response: table.Table, metadata: Sequence[Tuple[str, Union[str, bytes]]] - ) -> Tuple[table.Table, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for get_table - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_get_table_with_metadata` - interceptor in new development instead of the `post_get_table` interceptor. - When both interceptors are used, this `post_get_table_with_metadata` interceptor runs after the - `post_get_table` interceptor. The (possibly modified) response returned by - `post_get_table` will be passed to - `post_get_table_with_metadata`. - """ - return response, metadata - - def pre_list_authorized_views( - self, - request: bigtable_table_admin.ListAuthorizedViewsRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.ListAuthorizedViewsRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for list_authorized_views - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_list_authorized_views( - self, response: bigtable_table_admin.ListAuthorizedViewsResponse - ) -> bigtable_table_admin.ListAuthorizedViewsResponse: - """Post-rpc interceptor for list_authorized_views - - DEPRECATED. Please use the `post_list_authorized_views_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_list_authorized_views` interceptor runs - before the `post_list_authorized_views_with_metadata` interceptor. - """ - return response - - def post_list_authorized_views_with_metadata( - self, - response: bigtable_table_admin.ListAuthorizedViewsResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.ListAuthorizedViewsResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_authorized_views - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_list_authorized_views_with_metadata` - interceptor in new development instead of the `post_list_authorized_views` interceptor. - When both interceptors are used, this `post_list_authorized_views_with_metadata` interceptor runs after the - `post_list_authorized_views` interceptor. The (possibly modified) response returned by - `post_list_authorized_views` will be passed to - `post_list_authorized_views_with_metadata`. - """ - return response, metadata - - def pre_list_backups( - self, - request: bigtable_table_admin.ListBackupsRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.ListBackupsRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for list_backups - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_list_backups( - self, response: bigtable_table_admin.ListBackupsResponse - ) -> bigtable_table_admin.ListBackupsResponse: - """Post-rpc interceptor for list_backups - - DEPRECATED. Please use the `post_list_backups_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_list_backups` interceptor runs - before the `post_list_backups_with_metadata` interceptor. - """ - return response - - def post_list_backups_with_metadata( - self, - response: bigtable_table_admin.ListBackupsResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.ListBackupsResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_backups - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_list_backups_with_metadata` - interceptor in new development instead of the `post_list_backups` interceptor. - When both interceptors are used, this `post_list_backups_with_metadata` interceptor runs after the - `post_list_backups` interceptor. The (possibly modified) response returned by - `post_list_backups` will be passed to - `post_list_backups_with_metadata`. - """ - return response, metadata - - def pre_list_schema_bundles( - self, - request: bigtable_table_admin.ListSchemaBundlesRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.ListSchemaBundlesRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for list_schema_bundles - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_list_schema_bundles( - self, response: bigtable_table_admin.ListSchemaBundlesResponse - ) -> bigtable_table_admin.ListSchemaBundlesResponse: - """Post-rpc interceptor for list_schema_bundles - - DEPRECATED. Please use the `post_list_schema_bundles_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_list_schema_bundles` interceptor runs - before the `post_list_schema_bundles_with_metadata` interceptor. - """ - return response - - def post_list_schema_bundles_with_metadata( - self, - response: bigtable_table_admin.ListSchemaBundlesResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.ListSchemaBundlesResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_schema_bundles - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_list_schema_bundles_with_metadata` - interceptor in new development instead of the `post_list_schema_bundles` interceptor. - When both interceptors are used, this `post_list_schema_bundles_with_metadata` interceptor runs after the - `post_list_schema_bundles` interceptor. The (possibly modified) response returned by - `post_list_schema_bundles` will be passed to - `post_list_schema_bundles_with_metadata`. - """ - return response, metadata - - def pre_list_snapshots( - self, - request: bigtable_table_admin.ListSnapshotsRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.ListSnapshotsRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for list_snapshots - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_list_snapshots( - self, response: bigtable_table_admin.ListSnapshotsResponse - ) -> bigtable_table_admin.ListSnapshotsResponse: - """Post-rpc interceptor for list_snapshots - - DEPRECATED. Please use the `post_list_snapshots_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_list_snapshots` interceptor runs - before the `post_list_snapshots_with_metadata` interceptor. - """ - return response - - def post_list_snapshots_with_metadata( - self, - response: bigtable_table_admin.ListSnapshotsResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.ListSnapshotsResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for list_snapshots - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_list_snapshots_with_metadata` - interceptor in new development instead of the `post_list_snapshots` interceptor. - When both interceptors are used, this `post_list_snapshots_with_metadata` interceptor runs after the - `post_list_snapshots` interceptor. The (possibly modified) response returned by - `post_list_snapshots` will be passed to - `post_list_snapshots_with_metadata`. - """ - return response, metadata - - def pre_list_tables( - self, - request: bigtable_table_admin.ListTablesRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.ListTablesRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for list_tables - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_list_tables( - self, response: bigtable_table_admin.ListTablesResponse - ) -> bigtable_table_admin.ListTablesResponse: - """Post-rpc interceptor for list_tables - - DEPRECATED. Please use the `post_list_tables_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_list_tables` interceptor runs - before the `post_list_tables_with_metadata` interceptor. - """ - return response - - def post_list_tables_with_metadata( - self, - response: bigtable_table_admin.ListTablesResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.ListTablesResponse, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Post-rpc interceptor for list_tables - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_list_tables_with_metadata` - interceptor in new development instead of the `post_list_tables` interceptor. - When both interceptors are used, this `post_list_tables_with_metadata` interceptor runs after the - `post_list_tables` interceptor. The (possibly modified) response returned by - `post_list_tables` will be passed to - `post_list_tables_with_metadata`. - """ - return response, metadata - - def pre_modify_column_families( - self, - request: bigtable_table_admin.ModifyColumnFamiliesRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.ModifyColumnFamiliesRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for modify_column_families - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_modify_column_families(self, response: table.Table) -> table.Table: - """Post-rpc interceptor for modify_column_families - - DEPRECATED. Please use the `post_modify_column_families_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_modify_column_families` interceptor runs - before the `post_modify_column_families_with_metadata` interceptor. - """ - return response - - def post_modify_column_families_with_metadata( - self, response: table.Table, metadata: Sequence[Tuple[str, Union[str, bytes]]] - ) -> Tuple[table.Table, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for modify_column_families - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_modify_column_families_with_metadata` - interceptor in new development instead of the `post_modify_column_families` interceptor. - When both interceptors are used, this `post_modify_column_families_with_metadata` interceptor runs after the - `post_modify_column_families` interceptor. The (possibly modified) response returned by - `post_modify_column_families` will be passed to - `post_modify_column_families_with_metadata`. - """ - return response, metadata - - def pre_restore_table( - self, - request: bigtable_table_admin.RestoreTableRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.RestoreTableRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for restore_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_restore_table( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for restore_table - - DEPRECATED. Please use the `post_restore_table_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_restore_table` interceptor runs - before the `post_restore_table_with_metadata` interceptor. - """ - return response - - def post_restore_table_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for restore_table - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_restore_table_with_metadata` - interceptor in new development instead of the `post_restore_table` interceptor. - When both interceptors are used, this `post_restore_table_with_metadata` interceptor runs after the - `post_restore_table` interceptor. The (possibly modified) response returned by - `post_restore_table` will be passed to - `post_restore_table_with_metadata`. - """ - return response, metadata - - def pre_set_iam_policy( - self, - request: iam_policy_pb2.SetIamPolicyRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for set_iam_policy - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: - """Post-rpc interceptor for set_iam_policy - - DEPRECATED. Please use the `post_set_iam_policy_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_set_iam_policy` interceptor runs - before the `post_set_iam_policy_with_metadata` interceptor. - """ - return response - - def post_set_iam_policy_with_metadata( - self, - response: policy_pb2.Policy, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for set_iam_policy - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_set_iam_policy_with_metadata` - interceptor in new development instead of the `post_set_iam_policy` interceptor. - When both interceptors are used, this `post_set_iam_policy_with_metadata` interceptor runs after the - `post_set_iam_policy` interceptor. The (possibly modified) response returned by - `post_set_iam_policy` will be passed to - `post_set_iam_policy_with_metadata`. - """ - return response, metadata - - def pre_snapshot_table( - self, - request: bigtable_table_admin.SnapshotTableRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.SnapshotTableRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for snapshot_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_snapshot_table( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for snapshot_table - - DEPRECATED. Please use the `post_snapshot_table_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_snapshot_table` interceptor runs - before the `post_snapshot_table_with_metadata` interceptor. - """ - return response - - def post_snapshot_table_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for snapshot_table - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_snapshot_table_with_metadata` - interceptor in new development instead of the `post_snapshot_table` interceptor. - When both interceptors are used, this `post_snapshot_table_with_metadata` interceptor runs after the - `post_snapshot_table` interceptor. The (possibly modified) response returned by - `post_snapshot_table` will be passed to - `post_snapshot_table_with_metadata`. - """ - return response, metadata - - def pre_test_iam_permissions( - self, - request: iam_policy_pb2.TestIamPermissionsRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - iam_policy_pb2.TestIamPermissionsRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for test_iam_permissions - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_test_iam_permissions( - self, response: iam_policy_pb2.TestIamPermissionsResponse - ) -> iam_policy_pb2.TestIamPermissionsResponse: - """Post-rpc interceptor for test_iam_permissions - - DEPRECATED. Please use the `post_test_iam_permissions_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_test_iam_permissions` interceptor runs - before the `post_test_iam_permissions_with_metadata` interceptor. - """ - return response - - def post_test_iam_permissions_with_metadata( - self, - response: iam_policy_pb2.TestIamPermissionsResponse, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - iam_policy_pb2.TestIamPermissionsResponse, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Post-rpc interceptor for test_iam_permissions - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_test_iam_permissions_with_metadata` - interceptor in new development instead of the `post_test_iam_permissions` interceptor. - When both interceptors are used, this `post_test_iam_permissions_with_metadata` interceptor runs after the - `post_test_iam_permissions` interceptor. The (possibly modified) response returned by - `post_test_iam_permissions` will be passed to - `post_test_iam_permissions_with_metadata`. - """ - return response, metadata - - def pre_undelete_table( - self, - request: bigtable_table_admin.UndeleteTableRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.UndeleteTableRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for undelete_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_undelete_table( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for undelete_table - - DEPRECATED. Please use the `post_undelete_table_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_undelete_table` interceptor runs - before the `post_undelete_table_with_metadata` interceptor. - """ - return response - - def post_undelete_table_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for undelete_table - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_undelete_table_with_metadata` - interceptor in new development instead of the `post_undelete_table` interceptor. - When both interceptors are used, this `post_undelete_table_with_metadata` interceptor runs after the - `post_undelete_table` interceptor. The (possibly modified) response returned by - `post_undelete_table` will be passed to - `post_undelete_table_with_metadata`. - """ - return response, metadata - - def pre_update_authorized_view( - self, - request: bigtable_table_admin.UpdateAuthorizedViewRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.UpdateAuthorizedViewRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for update_authorized_view - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_update_authorized_view( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for update_authorized_view - - DEPRECATED. Please use the `post_update_authorized_view_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_update_authorized_view` interceptor runs - before the `post_update_authorized_view_with_metadata` interceptor. - """ - return response - - def post_update_authorized_view_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for update_authorized_view - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_update_authorized_view_with_metadata` - interceptor in new development instead of the `post_update_authorized_view` interceptor. - When both interceptors are used, this `post_update_authorized_view_with_metadata` interceptor runs after the - `post_update_authorized_view` interceptor. The (possibly modified) response returned by - `post_update_authorized_view` will be passed to - `post_update_authorized_view_with_metadata`. - """ - return response, metadata - - def pre_update_backup( - self, - request: bigtable_table_admin.UpdateBackupRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.UpdateBackupRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for update_backup - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_update_backup(self, response: table.Backup) -> table.Backup: - """Post-rpc interceptor for update_backup - - DEPRECATED. Please use the `post_update_backup_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_update_backup` interceptor runs - before the `post_update_backup_with_metadata` interceptor. - """ - return response - - def post_update_backup_with_metadata( - self, response: table.Backup, metadata: Sequence[Tuple[str, Union[str, bytes]]] - ) -> Tuple[table.Backup, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for update_backup - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_update_backup_with_metadata` - interceptor in new development instead of the `post_update_backup` interceptor. - When both interceptors are used, this `post_update_backup_with_metadata` interceptor runs after the - `post_update_backup` interceptor. The (possibly modified) response returned by - `post_update_backup` will be passed to - `post_update_backup_with_metadata`. - """ - return response, metadata - - def pre_update_schema_bundle( - self, - request: bigtable_table_admin.UpdateSchemaBundleRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.UpdateSchemaBundleRequest, - Sequence[Tuple[str, Union[str, bytes]]], - ]: - """Pre-rpc interceptor for update_schema_bundle - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_update_schema_bundle( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for update_schema_bundle - - DEPRECATED. Please use the `post_update_schema_bundle_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_update_schema_bundle` interceptor runs - before the `post_update_schema_bundle_with_metadata` interceptor. - """ - return response - - def post_update_schema_bundle_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for update_schema_bundle - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_update_schema_bundle_with_metadata` - interceptor in new development instead of the `post_update_schema_bundle` interceptor. - When both interceptors are used, this `post_update_schema_bundle_with_metadata` interceptor runs after the - `post_update_schema_bundle` interceptor. The (possibly modified) response returned by - `post_update_schema_bundle` will be passed to - `post_update_schema_bundle_with_metadata`. - """ - return response, metadata - - def pre_update_table( - self, - request: bigtable_table_admin.UpdateTableRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - bigtable_table_admin.UpdateTableRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for update_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_update_table( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for update_table - - DEPRECATED. Please use the `post_update_table_with_metadata` - interceptor instead. - - Override in a subclass to read or manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. This `post_update_table` interceptor runs - before the `post_update_table_with_metadata` interceptor. - """ - return response - - def post_update_table_with_metadata( - self, - response: operations_pb2.Operation, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: - """Post-rpc interceptor for update_table - - Override in a subclass to read or manipulate the response or metadata after it - is returned by the BigtableTableAdmin server but before it is returned to user code. - - We recommend only using this `post_update_table_with_metadata` - interceptor in new development instead of the `post_update_table` interceptor. - When both interceptors are used, this `post_update_table_with_metadata` interceptor runs after the - `post_update_table` interceptor. The (possibly modified) response returned by - `post_update_table` will be passed to - `post_update_table_with_metadata`. - """ - return response, metadata - - -@dataclasses.dataclass -class BigtableTableAdminRestStub: - _session: AuthorizedSession - _host: str - _interceptor: BigtableTableAdminRestInterceptor - - -class BigtableTableAdminRestTransport(_BaseBigtableTableAdminRestTransport): - """REST backend synchronous transport for BigtableTableAdmin. - - Service for creating, configuring, and deleting Cloud - Bigtable tables. - - Provides access to the table schemas only, not the data stored - within the tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends JSON representations of protocol buffers over HTTP/1.1 - """ - - def __init__( - self, - *, - host: str = "bigtableadmin.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = "https", - interceptor: Optional[BigtableTableAdminRestInterceptor] = None, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. - """ - # Run the base constructor - # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. - # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the - # credentials object - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - url_scheme=url_scheme, - api_audience=api_audience, - ) - self._session = AuthorizedSession( - self._credentials, default_host=self.DEFAULT_HOST - ) - self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None - if client_cert_source_for_mtls: - self._session.configure_mtls_channel(client_cert_source_for_mtls) - self._interceptor = interceptor or BigtableTableAdminRestInterceptor() - self._prep_wrapped_messages(client_info) - - @property - def operations_client(self) -> operations_v1.AbstractOperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Only create a new client if we do not already have one. - if self._operations_client is None: - http_options: Dict[str, List[Dict[str, str]]] = { - "google.longrunning.Operations.CancelOperation": [ - { - "method": "post", - "uri": "/v2/{name=operations/**}:cancel", - }, - ], - "google.longrunning.Operations.DeleteOperation": [ - { - "method": "delete", - "uri": "/v2/{name=operations/**}", - }, - ], - "google.longrunning.Operations.GetOperation": [ - { - "method": "get", - "uri": "/v2/{name=operations/**}", - }, - ], - "google.longrunning.Operations.ListOperations": [ - { - "method": "get", - "uri": "/v2/{name=operations/projects/**}/operations", - }, - ], - } - - rest_transport = operations_v1.OperationsRestTransport( - host=self._host, - # use the credentials which are saved - credentials=self._credentials, - scopes=self._scopes, - http_options=http_options, - path_prefix="v2", - ) - - self._operations_client = operations_v1.AbstractOperationsClient( - transport=rest_transport - ) - - # Return the client from cache. - return self._operations_client - - class _CheckConsistency( - _BaseBigtableTableAdminRestTransport._BaseCheckConsistency, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.CheckConsistency") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.CheckConsistencyRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_table_admin.CheckConsistencyResponse: - r"""Call the check consistency method over HTTP. - - Args: - request (~.bigtable_table_admin.CheckConsistencyRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_table_admin.CheckConsistencyResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_http_options() - ) - - request, metadata = self._interceptor.pre_check_consistency( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CheckConsistency", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CheckConsistency", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._CheckConsistency._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_table_admin.CheckConsistencyResponse() - pb_resp = bigtable_table_admin.CheckConsistencyResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_check_consistency(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_check_consistency_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = ( - bigtable_table_admin.CheckConsistencyResponse.to_json(response) - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.check_consistency", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CheckConsistency", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _CopyBackup( - _BaseBigtableTableAdminRestTransport._BaseCopyBackup, BigtableTableAdminRestStub - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.CopyBackup") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.CopyBackupRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the copy backup method over HTTP. - - Args: - request (~.bigtable_table_admin.CopyBackupRequest): - The request object. The request for - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_http_options() - ) - - request, metadata = self._interceptor.pre_copy_backup(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CopyBackup", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CopyBackup", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._CopyBackup._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_copy_backup(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_copy_backup_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.copy_backup", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CopyBackup", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _CreateAuthorizedView( - _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.CreateAuthorizedView") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.CreateAuthorizedViewRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the create authorized view method over HTTP. - - Args: - request (~.bigtable_table_admin.CreateAuthorizedViewRequest): - The request object. The request for - [CreateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_http_options() - ) - - request, metadata = self._interceptor.pre_create_authorized_view( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateAuthorizedView", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CreateAuthorizedView", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableTableAdminRestTransport._CreateAuthorizedView._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_create_authorized_view(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_authorized_view_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_authorized_view", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CreateAuthorizedView", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _CreateBackup( - _BaseBigtableTableAdminRestTransport._BaseCreateBackup, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.CreateBackup") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.CreateBackupRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the create backup method over HTTP. - - Args: - request (~.bigtable_table_admin.CreateBackupRequest): - The request object. The request for - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_http_options() - ) - - request, metadata = self._interceptor.pre_create_backup(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateBackup", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CreateBackup", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._CreateBackup._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_create_backup(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_backup_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_backup", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CreateBackup", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _CreateSchemaBundle( - _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.CreateSchemaBundle") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.CreateSchemaBundleRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the create schema bundle method over HTTP. - - Args: - request (~.bigtable_table_admin.CreateSchemaBundleRequest): - The request object. The request for - [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_http_options() - ) - - request, metadata = self._interceptor.pre_create_schema_bundle( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateSchemaBundle", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CreateSchemaBundle", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableTableAdminRestTransport._CreateSchemaBundle._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_create_schema_bundle(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_schema_bundle_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_schema_bundle", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CreateSchemaBundle", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _CreateTable( - _BaseBigtableTableAdminRestTransport._BaseCreateTable, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.CreateTable") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.CreateTableRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> gba_table.Table: - r"""Call the create table method over HTTP. - - Args: - request (~.bigtable_table_admin.CreateTableRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.gba_table.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_http_options() - ) - - request, metadata = self._interceptor.pre_create_table(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateTable", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CreateTable", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._CreateTable._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = gba_table.Table() - pb_resp = gba_table.Table.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_create_table(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_table_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = gba_table.Table.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_table", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CreateTable", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _CreateTableFromSnapshot( - _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.CreateTableFromSnapshot") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.CreateTableFromSnapshotRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the create table from - snapshot method over HTTP. - - Args: - request (~.bigtable_table_admin.CreateTableFromSnapshotRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_http_options() - ) - - request, metadata = self._interceptor.pre_create_table_from_snapshot( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateTableFromSnapshot", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CreateTableFromSnapshot", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableTableAdminRestTransport._CreateTableFromSnapshot._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_create_table_from_snapshot(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_table_from_snapshot_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_table_from_snapshot", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CreateTableFromSnapshot", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _DeleteAuthorizedView( - _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.DeleteAuthorizedView") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.DeleteAuthorizedViewRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ): - r"""Call the delete authorized view method over HTTP. - - Args: - request (~.bigtable_table_admin.DeleteAuthorizedViewRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_http_options() - ) - - request, metadata = self._interceptor.pre_delete_authorized_view( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteAuthorizedView", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "DeleteAuthorizedView", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableTableAdminRestTransport._DeleteAuthorizedView._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DeleteBackup( - _BaseBigtableTableAdminRestTransport._BaseDeleteBackup, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.DeleteBackup") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.DeleteBackupRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ): - r"""Call the delete backup method over HTTP. - - Args: - request (~.bigtable_table_admin.DeleteBackupRequest): - The request object. The request for - [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_http_options() - ) - - request, metadata = self._interceptor.pre_delete_backup(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteBackup", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "DeleteBackup", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._DeleteBackup._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DeleteSchemaBundle( - _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.DeleteSchemaBundle") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.DeleteSchemaBundleRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ): - r"""Call the delete schema bundle method over HTTP. - - Args: - request (~.bigtable_table_admin.DeleteSchemaBundleRequest): - The request object. The request for - [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_http_options() - ) - - request, metadata = self._interceptor.pre_delete_schema_bundle( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteSchemaBundle", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "DeleteSchemaBundle", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableTableAdminRestTransport._DeleteSchemaBundle._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DeleteSnapshot( - _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.DeleteSnapshot") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.DeleteSnapshotRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ): - r"""Call the delete snapshot method over HTTP. - - Args: - request (~.bigtable_table_admin.DeleteSnapshotRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_http_options() - ) - - request, metadata = self._interceptor.pre_delete_snapshot(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteSnapshot", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "DeleteSnapshot", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._DeleteSnapshot._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DeleteTable( - _BaseBigtableTableAdminRestTransport._BaseDeleteTable, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.DeleteTable") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.DeleteTableRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ): - r"""Call the delete table method over HTTP. - - Args: - request (~.bigtable_table_admin.DeleteTableRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_http_options() - ) - - request, metadata = self._interceptor.pre_delete_table(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteTable", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "DeleteTable", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._DeleteTable._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DropRowRange( - _BaseBigtableTableAdminRestTransport._BaseDropRowRange, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.DropRowRange") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.DropRowRangeRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ): - r"""Call the drop row range method over HTTP. - - Args: - request (~.bigtable_table_admin.DropRowRangeRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_http_options() - ) - - request, metadata = self._interceptor.pre_drop_row_range(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DropRowRange", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "DropRowRange", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._DropRowRange._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _GenerateConsistencyToken( - _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.GenerateConsistencyToken") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.GenerateConsistencyTokenRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: - r"""Call the generate consistency - token method over HTTP. - - Args: - request (~.bigtable_table_admin.GenerateConsistencyTokenRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_table_admin.GenerateConsistencyTokenResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_http_options() - ) - - request, metadata = self._interceptor.pre_generate_consistency_token( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GenerateConsistencyToken", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GenerateConsistencyToken", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableTableAdminRestTransport._GenerateConsistencyToken._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_table_admin.GenerateConsistencyTokenResponse() - pb_resp = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_generate_consistency_token(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_generate_consistency_token_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = ( - bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( - response - ) - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.generate_consistency_token", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GenerateConsistencyToken", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _GetAuthorizedView( - _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.GetAuthorizedView") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.GetAuthorizedViewRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.AuthorizedView: - r"""Call the get authorized view method over HTTP. - - Args: - request (~.bigtable_table_admin.GetAuthorizedViewRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.table.AuthorizedView: - AuthorizedViews represent subsets of - a particular Cloud Bigtable table. Users - can configure access to each Authorized - View independently from the table and - use the existing Data APIs to access the - subset of data. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_http_options() - ) - - request, metadata = self._interceptor.pre_get_authorized_view( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetAuthorizedView", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GetAuthorizedView", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._GetAuthorizedView._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.AuthorizedView() - pb_resp = table.AuthorizedView.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_get_authorized_view(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_authorized_view_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = table.AuthorizedView.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_authorized_view", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GetAuthorizedView", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _GetBackup( - _BaseBigtableTableAdminRestTransport._BaseGetBackup, BigtableTableAdminRestStub - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.GetBackup") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.GetBackupRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Backup: - r"""Call the get backup method over HTTP. - - Args: - request (~.bigtable_table_admin.GetBackupRequest): - The request object. The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.table.Backup: - A backup of a Cloud Bigtable table. - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_http_options() - ) - - request, metadata = self._interceptor.pre_get_backup(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetBackup", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GetBackup", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._GetBackup._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.Backup() - pb_resp = table.Backup.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_get_backup(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_backup_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = table.Backup.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_backup", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GetBackup", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _GetIamPolicy( - _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.GetIamPolicy") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: iam_policy_pb2.GetIamPolicyRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. - - Args: - request (~.iam_policy_pb2.GetIamPolicyRequest): - The request object. Request message for ``GetIamPolicy`` method. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which - specifies access controls for Google Cloud resources. - - A ``Policy`` is a collection of ``bindings``. A - ``binding`` binds one or more ``members``, or - principals, to a single ``role``. Principals can be user - accounts, service accounts, Google groups, and domains - (such as G Suite). A ``role`` is a named list of - permissions; each ``role`` can be an IAM predefined role - or a user-created custom role. - - For some types of Google Cloud resources, a ``binding`` - can also specify a ``condition``, which is a logical - expression that allows access to a resource only if the - expression evaluates to ``true``. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the `IAM - documentation `__. - - **JSON example:** - - :: - - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } - - **YAML example:** - - :: - - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 - - For a description of IAM and its features, see the `IAM - documentation `__. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_http_options() - ) - - request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetIamPolicy", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GetIamPolicy", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._GetIamPolicy._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = policy_pb2.Policy() - pb_resp = resp - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_get_iam_policy(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_iam_policy_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_iam_policy", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GetIamPolicy", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _GetSchemaBundle( - _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.GetSchemaBundle") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.GetSchemaBundleRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.SchemaBundle: - r"""Call the get schema bundle method over HTTP. - - Args: - request (~.bigtable_table_admin.GetSchemaBundleRequest): - The request object. The request for - [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.table.SchemaBundle: - A named collection of related - schemas. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_http_options() - ) - - request, metadata = self._interceptor.pre_get_schema_bundle( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetSchemaBundle", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GetSchemaBundle", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._GetSchemaBundle._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.SchemaBundle() - pb_resp = table.SchemaBundle.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_get_schema_bundle(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_schema_bundle_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = table.SchemaBundle.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_schema_bundle", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GetSchemaBundle", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _GetSnapshot( - _BaseBigtableTableAdminRestTransport._BaseGetSnapshot, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.GetSnapshot") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.GetSnapshotRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Snapshot: - r"""Call the get snapshot method over HTTP. - - Args: - request (~.bigtable_table_admin.GetSnapshotRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.table.Snapshot: - A snapshot of a table at a particular - time. A snapshot can be used as a - checkpoint for data restoration or a - data source for a new table. - - Note: This is a private alpha release of - Cloud Bigtable snapshots. This feature - is not currently available to most Cloud - Bigtable customers. This feature might - be changed in backward-incompatible ways - and is not recommended for production - use. It is not subject to any SLA or - deprecation policy. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_http_options() - ) - - request, metadata = self._interceptor.pre_get_snapshot(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetSnapshot", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GetSnapshot", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._GetSnapshot._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.Snapshot() - pb_resp = table.Snapshot.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_get_snapshot(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_snapshot_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = table.Snapshot.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_snapshot", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GetSnapshot", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _GetTable( - _BaseBigtableTableAdminRestTransport._BaseGetTable, BigtableTableAdminRestStub - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.GetTable") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.GetTableRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Table: - r"""Call the get table method over HTTP. - - Args: - request (~.bigtable_table_admin.GetTableRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.table.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseGetTable._get_http_options() - ) - - request, metadata = self._interceptor.pre_get_table(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetTable._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseGetTable._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetTable", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GetTable", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._GetTable._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.Table() - pb_resp = table.Table.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_get_table(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_get_table_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = table.Table.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_table", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "GetTable", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _ListAuthorizedViews( - _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.ListAuthorizedViews") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.ListAuthorizedViewsRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_table_admin.ListAuthorizedViewsResponse: - r"""Call the list authorized views method over HTTP. - - Args: - request (~.bigtable_table_admin.ListAuthorizedViewsRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_table_admin.ListAuthorizedViewsResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_http_options() - ) - - request, metadata = self._interceptor.pre_list_authorized_views( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListAuthorizedViews", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListAuthorizedViews", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableTableAdminRestTransport._ListAuthorizedViews._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_table_admin.ListAuthorizedViewsResponse() - pb_resp = bigtable_table_admin.ListAuthorizedViewsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_list_authorized_views(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_authorized_views_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = ( - bigtable_table_admin.ListAuthorizedViewsResponse.to_json( - response - ) - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_authorized_views", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListAuthorizedViews", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _ListBackups( - _BaseBigtableTableAdminRestTransport._BaseListBackups, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.ListBackups") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.ListBackupsRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_table_admin.ListBackupsResponse: - r"""Call the list backups method over HTTP. - - Args: - request (~.bigtable_table_admin.ListBackupsRequest): - The request object. The request for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_table_admin.ListBackupsResponse: - The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseListBackups._get_http_options() - ) - - request, metadata = self._interceptor.pre_list_backups(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListBackups", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListBackups", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._ListBackups._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_table_admin.ListBackupsResponse() - pb_resp = bigtable_table_admin.ListBackupsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_list_backups(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_backups_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = bigtable_table_admin.ListBackupsResponse.to_json( - response - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_backups", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListBackups", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _ListSchemaBundles( - _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.ListSchemaBundles") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.ListSchemaBundlesRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_table_admin.ListSchemaBundlesResponse: - r"""Call the list schema bundles method over HTTP. - - Args: - request (~.bigtable_table_admin.ListSchemaBundlesRequest): - The request object. The request for - [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_table_admin.ListSchemaBundlesResponse: - The response for - [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_http_options() - ) - - request, metadata = self._interceptor.pre_list_schema_bundles( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListSchemaBundles", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListSchemaBundles", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._ListSchemaBundles._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_table_admin.ListSchemaBundlesResponse() - pb_resp = bigtable_table_admin.ListSchemaBundlesResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_list_schema_bundles(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_schema_bundles_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = ( - bigtable_table_admin.ListSchemaBundlesResponse.to_json(response) - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_schema_bundles", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListSchemaBundles", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _ListSnapshots( - _BaseBigtableTableAdminRestTransport._BaseListSnapshots, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.ListSnapshots") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.ListSnapshotsRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_table_admin.ListSnapshotsResponse: - r"""Call the list snapshots method over HTTP. - - Args: - request (~.bigtable_table_admin.ListSnapshotsRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_table_admin.ListSnapshotsResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_http_options() - ) - - request, metadata = self._interceptor.pre_list_snapshots(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListSnapshots", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListSnapshots", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._ListSnapshots._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_table_admin.ListSnapshotsResponse() - pb_resp = bigtable_table_admin.ListSnapshotsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_list_snapshots(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_snapshots_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = ( - bigtable_table_admin.ListSnapshotsResponse.to_json(response) - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_snapshots", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListSnapshots", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _ListTables( - _BaseBigtableTableAdminRestTransport._BaseListTables, BigtableTableAdminRestStub - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.ListTables") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - return response - - def __call__( - self, - request: bigtable_table_admin.ListTablesRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_table_admin.ListTablesResponse: - r"""Call the list tables method over HTTP. - - Args: - request (~.bigtable_table_admin.ListTablesRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.bigtable_table_admin.ListTablesResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseListTables._get_http_options() - ) - - request, metadata = self._interceptor.pre_list_tables(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListTables._get_transcoded_request( - http_options, request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseListTables._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListTables", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListTables", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._ListTables._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_table_admin.ListTablesResponse() - pb_resp = bigtable_table_admin.ListTablesResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_list_tables(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_tables_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = bigtable_table_admin.ListTablesResponse.to_json( - response - ) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_tables", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListTables", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _ModifyColumnFamilies( - _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.ModifyColumnFamilies") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.ModifyColumnFamiliesRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Table: - r"""Call the modify column families method over HTTP. - - Args: - request (~.bigtable_table_admin.ModifyColumnFamiliesRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.table.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_http_options() - ) - - request, metadata = self._interceptor.pre_modify_column_families( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ModifyColumnFamilies", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ModifyColumnFamilies", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableTableAdminRestTransport._ModifyColumnFamilies._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.Table() - pb_resp = table.Table.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_modify_column_families(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_modify_column_families_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = table.Table.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.modify_column_families", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ModifyColumnFamilies", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _RestoreTable( - _BaseBigtableTableAdminRestTransport._BaseRestoreTable, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.RestoreTable") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.RestoreTableRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the restore table method over HTTP. - - Args: - request (~.bigtable_table_admin.RestoreTableRequest): - The request object. The request for - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_http_options() - ) - - request, metadata = self._interceptor.pre_restore_table(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.RestoreTable", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "RestoreTable", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._RestoreTable._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_restore_table(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_restore_table_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.restore_table", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "RestoreTable", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _SetIamPolicy( - _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.SetIamPolicy") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: iam_policy_pb2.SetIamPolicyRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. - - Args: - request (~.iam_policy_pb2.SetIamPolicyRequest): - The request object. Request message for ``SetIamPolicy`` method. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which - specifies access controls for Google Cloud resources. - - A ``Policy`` is a collection of ``bindings``. A - ``binding`` binds one or more ``members``, or - principals, to a single ``role``. Principals can be user - accounts, service accounts, Google groups, and domains - (such as G Suite). A ``role`` is a named list of - permissions; each ``role`` can be an IAM predefined role - or a user-created custom role. - - For some types of Google Cloud resources, a ``binding`` - can also specify a ``condition``, which is a logical - expression that allows access to a resource only if the - expression evaluates to ``true``. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the `IAM - documentation `__. - - **JSON example:** - - :: - - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } - - **YAML example:** - - :: - - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 - - For a description of IAM and its features, see the `IAM - documentation `__. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_http_options() - ) - - request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.SetIamPolicy", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "SetIamPolicy", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._SetIamPolicy._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = policy_pb2.Policy() - pb_resp = resp - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_set_iam_policy(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_set_iam_policy_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.set_iam_policy", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "SetIamPolicy", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _SnapshotTable( - _BaseBigtableTableAdminRestTransport._BaseSnapshotTable, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.SnapshotTable") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.SnapshotTableRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the snapshot table method over HTTP. - - Args: - request (~.bigtable_table_admin.SnapshotTableRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_http_options() - ) - - request, metadata = self._interceptor.pre_snapshot_table(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.SnapshotTable", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "SnapshotTable", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._SnapshotTable._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_snapshot_table(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_snapshot_table_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.snapshot_table", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "SnapshotTable", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _TestIamPermissions( - _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.TestIamPermissions") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: iam_policy_pb2.TestIamPermissionsRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. - - Args: - request (~.iam_policy_pb2.TestIamPermissionsRequest): - The request object. Request message for ``TestIamPermissions`` method. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.iam_policy_pb2.TestIamPermissionsResponse: - Response message for ``TestIamPermissions`` method. - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_http_options() - ) - - request, metadata = self._interceptor.pre_test_iam_permissions( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.TestIamPermissions", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "TestIamPermissions", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableTableAdminRestTransport._TestIamPermissions._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = iam_policy_pb2.TestIamPermissionsResponse() - pb_resp = resp - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_test_iam_permissions(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_test_iam_permissions_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.test_iam_permissions", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "TestIamPermissions", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _UndeleteTable( - _BaseBigtableTableAdminRestTransport._BaseUndeleteTable, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.UndeleteTable") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.UndeleteTableRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the undelete table method over HTTP. - - Args: - request (~.bigtable_table_admin.UndeleteTableRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_http_options() - ) - - request, metadata = self._interceptor.pre_undelete_table(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UndeleteTable", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "UndeleteTable", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._UndeleteTable._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_undelete_table(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_undelete_table_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.undelete_table", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "UndeleteTable", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _UpdateAuthorizedView( - _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.UpdateAuthorizedView") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.UpdateAuthorizedViewRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the update authorized view method over HTTP. - - Args: - request (~.bigtable_table_admin.UpdateAuthorizedViewRequest): - The request object. The request for - [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_http_options() - ) - - request, metadata = self._interceptor.pre_update_authorized_view( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateAuthorizedView", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "UpdateAuthorizedView", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableTableAdminRestTransport._UpdateAuthorizedView._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_update_authorized_view(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_update_authorized_view_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_authorized_view", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "UpdateAuthorizedView", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _UpdateBackup( - _BaseBigtableTableAdminRestTransport._BaseUpdateBackup, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.UpdateBackup") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.UpdateBackupRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> table.Backup: - r"""Call the update backup method over HTTP. - - Args: - request (~.bigtable_table_admin.UpdateBackupRequest): - The request object. The request for - [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.table.Backup: - A backup of a Cloud Bigtable table. - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_http_options() - ) - - request, metadata = self._interceptor.pre_update_backup(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = type(request).to_json(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateBackup", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "UpdateBackup", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._UpdateBackup._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.Backup() - pb_resp = table.Backup.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_update_backup(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_update_backup_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = table.Backup.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_backup", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "UpdateBackup", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _UpdateSchemaBundle( - _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.UpdateSchemaBundle") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.UpdateSchemaBundleRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the update schema bundle method over HTTP. - - Args: - request (~.bigtable_table_admin.UpdateSchemaBundleRequest): - The request object. The request for - [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_http_options() - ) - - request, metadata = self._interceptor.pre_update_schema_bundle( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateSchemaBundle", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "UpdateSchemaBundle", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = ( - BigtableTableAdminRestTransport._UpdateSchemaBundle._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_update_schema_bundle(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_update_schema_bundle_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_schema_bundle", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "UpdateSchemaBundle", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - class _UpdateTable( - _BaseBigtableTableAdminRestTransport._BaseUpdateTable, - BigtableTableAdminRestStub, - ): - def __hash__(self): - return hash("BigtableTableAdminRestTransport.UpdateTable") - - @staticmethod - def _get_response( - host, - metadata, - query_params, - session, - timeout, - transcoded_request, - body=None, - ): - uri = transcoded_request["uri"] - method = transcoded_request["method"] - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(session, method)( - "{host}{uri}".format(host=host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - return response - - def __call__( - self, - request: bigtable_table_admin.UpdateTableRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the update table method over HTTP. - - Args: - request (~.bigtable_table_admin.UpdateTableRequest): - The request object. The request for - [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be - sent along with the request as metadata. Normally, each value must be of type `str`, - but for metadata keys ending with the suffix `-bin`, the corresponding values must - be of type `bytes`. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options = ( - _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_http_options() - ) - - request, metadata = self._interceptor.pre_update_table(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_transcoded_request( - http_options, request - ) - - body = _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_request_body_json( - transcoded_request - ) - - # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_query_params_json( - transcoded_request - ) - - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - try: - request_payload = json_format.MessageToJson(request) - except: - request_payload = None - http_request = { - "payload": request_payload, - "requestMethod": method, - "requestUrl": request_url, - "headers": dict(metadata), - } - _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateTable", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "UpdateTable", - "httpRequest": http_request, - "metadata": http_request["headers"], - }, - ) - - # Send the request - response = BigtableTableAdminRestTransport._UpdateTable._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - - resp = self._interceptor.post_update_table(resp) - response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_update_table_with_metadata( - resp, response_metadata - ) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = json_format.MessageToJson(resp) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": response.status_code, - } - _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_table", - extra={ - "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "UpdateTable", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) - return resp - - @property - def check_consistency( - self, - ) -> Callable[ - [bigtable_table_admin.CheckConsistencyRequest], - bigtable_table_admin.CheckConsistencyResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CheckConsistency(self._session, self._host, self._interceptor) # type: ignore - - @property - def copy_backup( - self, - ) -> Callable[[bigtable_table_admin.CopyBackupRequest], operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CopyBackup(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.CreateAuthorizedViewRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateAuthorizedView(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_backup( - self, - ) -> Callable[[bigtable_table_admin.CreateBackupRequest], operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateBackup(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_schema_bundle( - self, - ) -> Callable[ - [bigtable_table_admin.CreateSchemaBundleRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateSchemaBundle(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_table( - self, - ) -> Callable[[bigtable_table_admin.CreateTableRequest], gba_table.Table]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_table_from_snapshot( - self, - ) -> Callable[ - [bigtable_table_admin.CreateTableFromSnapshotRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateTableFromSnapshot(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_authorized_view( - self, - ) -> Callable[[bigtable_table_admin.DeleteAuthorizedViewRequest], empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteAuthorizedView(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_backup( - self, - ) -> Callable[[bigtable_table_admin.DeleteBackupRequest], empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteBackup(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_schema_bundle( - self, - ) -> Callable[[bigtable_table_admin.DeleteSchemaBundleRequest], empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteSchemaBundle(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_snapshot( - self, - ) -> Callable[[bigtable_table_admin.DeleteSnapshotRequest], empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteSnapshot(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_table( - self, - ) -> Callable[[bigtable_table_admin.DeleteTableRequest], empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def drop_row_range( - self, - ) -> Callable[[bigtable_table_admin.DropRowRangeRequest], empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DropRowRange(self._session, self._host, self._interceptor) # type: ignore - - @property - def generate_consistency_token( - self, - ) -> Callable[ - [bigtable_table_admin.GenerateConsistencyTokenRequest], - bigtable_table_admin.GenerateConsistencyTokenResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GenerateConsistencyToken(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.GetAuthorizedViewRequest], table.AuthorizedView - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetAuthorizedView(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_backup( - self, - ) -> Callable[[bigtable_table_admin.GetBackupRequest], table.Backup]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetBackup(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_iam_policy( - self, - ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_schema_bundle( - self, - ) -> Callable[[bigtable_table_admin.GetSchemaBundleRequest], table.SchemaBundle]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetSchemaBundle(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_snapshot( - self, - ) -> Callable[[bigtable_table_admin.GetSnapshotRequest], table.Snapshot]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetSnapshot(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_table( - self, - ) -> Callable[[bigtable_table_admin.GetTableRequest], table.Table]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_authorized_views( - self, - ) -> Callable[ - [bigtable_table_admin.ListAuthorizedViewsRequest], - bigtable_table_admin.ListAuthorizedViewsResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListAuthorizedViews(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_backups( - self, - ) -> Callable[ - [bigtable_table_admin.ListBackupsRequest], - bigtable_table_admin.ListBackupsResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListBackups(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_schema_bundles( - self, - ) -> Callable[ - [bigtable_table_admin.ListSchemaBundlesRequest], - bigtable_table_admin.ListSchemaBundlesResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListSchemaBundles(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_snapshots( - self, - ) -> Callable[ - [bigtable_table_admin.ListSnapshotsRequest], - bigtable_table_admin.ListSnapshotsResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListSnapshots(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_tables( - self, - ) -> Callable[ - [bigtable_table_admin.ListTablesRequest], - bigtable_table_admin.ListTablesResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListTables(self._session, self._host, self._interceptor) # type: ignore - - @property - def modify_column_families( - self, - ) -> Callable[[bigtable_table_admin.ModifyColumnFamiliesRequest], table.Table]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ModifyColumnFamilies(self._session, self._host, self._interceptor) # type: ignore - - @property - def restore_table( - self, - ) -> Callable[[bigtable_table_admin.RestoreTableRequest], operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._RestoreTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def set_iam_policy( - self, - ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore - - @property - def snapshot_table( - self, - ) -> Callable[ - [bigtable_table_admin.SnapshotTableRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._SnapshotTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def test_iam_permissions( - self, - ) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - iam_policy_pb2.TestIamPermissionsResponse, - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore - - @property - def undelete_table( - self, - ) -> Callable[ - [bigtable_table_admin.UndeleteTableRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UndeleteTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_authorized_view( - self, - ) -> Callable[ - [bigtable_table_admin.UpdateAuthorizedViewRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateAuthorizedView(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_backup( - self, - ) -> Callable[[bigtable_table_admin.UpdateBackupRequest], table.Backup]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateBackup(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_schema_bundle( - self, - ) -> Callable[ - [bigtable_table_admin.UpdateSchemaBundleRequest], operations_pb2.Operation - ]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateSchemaBundle(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_table( - self, - ) -> Callable[[bigtable_table_admin.UpdateTableRequest], operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def kind(self) -> str: - return "rest" - - def close(self): - self._session.close() - - -__all__ = ("BigtableTableAdminRestTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py deleted file mode 100644 index ef6c2374d..000000000 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py +++ /dev/null @@ -1,2001 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import json # type: ignore -from google.api_core import path_template -from google.api_core import gapic_v1 - -from google.protobuf import json_format -from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO - -import re -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union - - -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore - - -class _BaseBigtableTableAdminRestTransport(BigtableTableAdminTransport): - """Base REST backend transport for BigtableTableAdmin. - - Note: This class is not meant to be used directly. Use its sync and - async sub-classes instead. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends JSON representations of protocol buffers over HTTP/1.1 - """ - - def __init__( - self, - *, - host: str = "bigtableadmin.googleapis.com", - credentials: Optional[Any] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = "https", - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[Any]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. - """ - # Run the base constructor - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError( - f"Unexpected hostname structure: {host}" - ) # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - class _BaseCheckConsistency: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.CheckConsistencyRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseCopyBackup: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.CopyBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseCreateAuthorizedView: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "authorizedViewId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews", - "body": "authorized_view", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.CreateAuthorizedViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseCreateBackup: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "backupId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups", - "body": "backup", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.CreateBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseCreateSchemaBundle: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "schemaBundleId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles", - "body": "schema_bundle", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.CreateSchemaBundleRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseCreateTable: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/tables", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.CreateTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseCreateTableFromSnapshot: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.CreateTableFromSnapshotRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseDeleteAuthorizedView: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.DeleteAuthorizedViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseDeleteBackup: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/clusters/*/backups/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.DeleteBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseDeleteSchemaBundle: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.DeleteSchemaBundleRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseDeleteSnapshot: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.DeleteSnapshotRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseDeleteTable: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/tables/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.DeleteTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseDropRowRange: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.DropRowRangeRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGenerateConsistencyToken: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( - request - ) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGetAuthorizedView: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.GetAuthorizedViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGetBackup: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/clusters/*/backups/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.GetBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGetIamPolicy: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*/authorizedViews/*}:getIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*/schemaBundles/*}:getIamPolicy", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGetSchemaBundle: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.GetSchemaBundleRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGetSnapshot: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.GetSnapshotRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseGetTable: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/tables/*}", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.GetTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseGetTable._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseListAuthorizedViews: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.ListAuthorizedViewsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseListBackups: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.ListBackupsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseListBackups._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseListSchemaBundles: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.ListSchemaBundlesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseListSnapshots: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.ListSnapshotsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseListTables: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*}/tables", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.ListTablesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseListTables._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseModifyColumnFamilies: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.ModifyColumnFamiliesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseRestoreTable: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/tables:restore", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.RestoreTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseSetIamPolicy: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*/authorizedViews/*}:setIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*/schemaBundles/*}:setIamPolicy", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseSnapshotTable: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:snapshot", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.SnapshotTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseTestIamPermissions: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*/authorizedViews/*}:testIamPermissions", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*/schemaBundles/*}:testIamPermissions", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseUndeleteTable: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:undelete", - "body": "*", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.UndeleteTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseUpdateAuthorizedView: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}", - "body": "authorized_view", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.UpdateAuthorizedViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseUpdateBackup: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}", - "body": "backup", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.UpdateBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseUpdateSchemaBundle: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{schema_bundle.name=projects/*/instances/*/tables/*/schemaBundles/*}", - "body": "schema_bundle", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.UpdateSchemaBundleRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - class _BaseUpdateTable: - def __hash__(self): # pragma: NO COVER - return NotImplementedError("__hash__ must be implemented.") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - @staticmethod - def _get_http_options(): - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{table.name=projects/*/instances/*/tables/*}", - "body": "table", - }, - ] - return http_options - - @staticmethod - def _get_transcoded_request(http_options, request): - pb_request = bigtable_table_admin.UpdateTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - return transcoded_request - - @staticmethod - def _get_request_body_json(transcoded_request): - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) - return body - - @staticmethod - def _get_query_params_json(transcoded_request): - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) - ) - query_params.update( - _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_unset_required_fields( - query_params - ) - ) - - query_params["$alt"] = "json;enum-encoding=int" - return query_params - - -__all__ = ("_BaseBigtableTableAdminRestTransport",) diff --git a/google/cloud/bigtable_admin_v2/types/__init__.py b/google/cloud/bigtable_admin_v2/types/__init__.py deleted file mode 100644 index e5deb36a1..000000000 --- a/google/cloud/bigtable_admin_v2/types/__init__.py +++ /dev/null @@ -1,268 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .bigtable_instance_admin import ( - CreateAppProfileRequest, - CreateClusterMetadata, - CreateClusterRequest, - CreateInstanceMetadata, - CreateInstanceRequest, - CreateLogicalViewMetadata, - CreateLogicalViewRequest, - CreateMaterializedViewMetadata, - CreateMaterializedViewRequest, - DeleteAppProfileRequest, - DeleteClusterRequest, - DeleteInstanceRequest, - DeleteLogicalViewRequest, - DeleteMaterializedViewRequest, - GetAppProfileRequest, - GetClusterRequest, - GetInstanceRequest, - GetLogicalViewRequest, - GetMaterializedViewRequest, - ListAppProfilesRequest, - ListAppProfilesResponse, - ListClustersRequest, - ListClustersResponse, - ListHotTabletsRequest, - ListHotTabletsResponse, - ListInstancesRequest, - ListInstancesResponse, - ListLogicalViewsRequest, - ListLogicalViewsResponse, - ListMaterializedViewsRequest, - ListMaterializedViewsResponse, - PartialUpdateClusterMetadata, - PartialUpdateClusterRequest, - PartialUpdateInstanceRequest, - UpdateAppProfileMetadata, - UpdateAppProfileRequest, - UpdateClusterMetadata, - UpdateInstanceMetadata, - UpdateLogicalViewMetadata, - UpdateLogicalViewRequest, - UpdateMaterializedViewMetadata, - UpdateMaterializedViewRequest, -) -from .bigtable_table_admin import ( - CheckConsistencyRequest, - CheckConsistencyResponse, - CopyBackupMetadata, - CopyBackupRequest, - CreateAuthorizedViewMetadata, - CreateAuthorizedViewRequest, - CreateBackupMetadata, - CreateBackupRequest, - CreateSchemaBundleMetadata, - CreateSchemaBundleRequest, - CreateTableFromSnapshotMetadata, - CreateTableFromSnapshotRequest, - CreateTableRequest, - DataBoostReadLocalWrites, - DeleteAuthorizedViewRequest, - DeleteBackupRequest, - DeleteSchemaBundleRequest, - DeleteSnapshotRequest, - DeleteTableRequest, - DropRowRangeRequest, - GenerateConsistencyTokenRequest, - GenerateConsistencyTokenResponse, - GetAuthorizedViewRequest, - GetBackupRequest, - GetSchemaBundleRequest, - GetSnapshotRequest, - GetTableRequest, - ListAuthorizedViewsRequest, - ListAuthorizedViewsResponse, - ListBackupsRequest, - ListBackupsResponse, - ListSchemaBundlesRequest, - ListSchemaBundlesResponse, - ListSnapshotsRequest, - ListSnapshotsResponse, - ListTablesRequest, - ListTablesResponse, - ModifyColumnFamiliesRequest, - OptimizeRestoredTableMetadata, - RestoreTableMetadata, - RestoreTableRequest, - SnapshotTableMetadata, - SnapshotTableRequest, - StandardReadRemoteWrites, - UndeleteTableMetadata, - UndeleteTableRequest, - UpdateAuthorizedViewMetadata, - UpdateAuthorizedViewRequest, - UpdateBackupRequest, - UpdateSchemaBundleMetadata, - UpdateSchemaBundleRequest, - UpdateTableMetadata, - UpdateTableRequest, -) -from .common import ( - OperationProgress, - StorageType, -) -from .instance import ( - AppProfile, - AutoscalingLimits, - AutoscalingTargets, - Cluster, - HotTablet, - Instance, - LogicalView, - MaterializedView, -) -from .table import ( - AuthorizedView, - Backup, - BackupInfo, - ChangeStreamConfig, - ColumnFamily, - EncryptionInfo, - GcRule, - ProtoSchema, - RestoreInfo, - SchemaBundle, - Snapshot, - Table, - RestoreSourceType, -) -from .types import ( - Type, -) - -__all__ = ( - "CreateAppProfileRequest", - "CreateClusterMetadata", - "CreateClusterRequest", - "CreateInstanceMetadata", - "CreateInstanceRequest", - "CreateLogicalViewMetadata", - "CreateLogicalViewRequest", - "CreateMaterializedViewMetadata", - "CreateMaterializedViewRequest", - "DeleteAppProfileRequest", - "DeleteClusterRequest", - "DeleteInstanceRequest", - "DeleteLogicalViewRequest", - "DeleteMaterializedViewRequest", - "GetAppProfileRequest", - "GetClusterRequest", - "GetInstanceRequest", - "GetLogicalViewRequest", - "GetMaterializedViewRequest", - "ListAppProfilesRequest", - "ListAppProfilesResponse", - "ListClustersRequest", - "ListClustersResponse", - "ListHotTabletsRequest", - "ListHotTabletsResponse", - "ListInstancesRequest", - "ListInstancesResponse", - "ListLogicalViewsRequest", - "ListLogicalViewsResponse", - "ListMaterializedViewsRequest", - "ListMaterializedViewsResponse", - "PartialUpdateClusterMetadata", - "PartialUpdateClusterRequest", - "PartialUpdateInstanceRequest", - "UpdateAppProfileMetadata", - "UpdateAppProfileRequest", - "UpdateClusterMetadata", - "UpdateInstanceMetadata", - "UpdateLogicalViewMetadata", - "UpdateLogicalViewRequest", - "UpdateMaterializedViewMetadata", - "UpdateMaterializedViewRequest", - "CheckConsistencyRequest", - "CheckConsistencyResponse", - "CopyBackupMetadata", - "CopyBackupRequest", - "CreateAuthorizedViewMetadata", - "CreateAuthorizedViewRequest", - "CreateBackupMetadata", - "CreateBackupRequest", - "CreateSchemaBundleMetadata", - "CreateSchemaBundleRequest", - "CreateTableFromSnapshotMetadata", - "CreateTableFromSnapshotRequest", - "CreateTableRequest", - "DataBoostReadLocalWrites", - "DeleteAuthorizedViewRequest", - "DeleteBackupRequest", - "DeleteSchemaBundleRequest", - "DeleteSnapshotRequest", - "DeleteTableRequest", - "DropRowRangeRequest", - "GenerateConsistencyTokenRequest", - "GenerateConsistencyTokenResponse", - "GetAuthorizedViewRequest", - "GetBackupRequest", - "GetSchemaBundleRequest", - "GetSnapshotRequest", - "GetTableRequest", - "ListAuthorizedViewsRequest", - "ListAuthorizedViewsResponse", - "ListBackupsRequest", - "ListBackupsResponse", - "ListSchemaBundlesRequest", - "ListSchemaBundlesResponse", - "ListSnapshotsRequest", - "ListSnapshotsResponse", - "ListTablesRequest", - "ListTablesResponse", - "ModifyColumnFamiliesRequest", - "OptimizeRestoredTableMetadata", - "RestoreTableMetadata", - "RestoreTableRequest", - "SnapshotTableMetadata", - "SnapshotTableRequest", - "StandardReadRemoteWrites", - "UndeleteTableMetadata", - "UndeleteTableRequest", - "UpdateAuthorizedViewMetadata", - "UpdateAuthorizedViewRequest", - "UpdateBackupRequest", - "UpdateSchemaBundleMetadata", - "UpdateSchemaBundleRequest", - "UpdateTableMetadata", - "UpdateTableRequest", - "OperationProgress", - "StorageType", - "AppProfile", - "AutoscalingLimits", - "AutoscalingTargets", - "Cluster", - "HotTablet", - "Instance", - "LogicalView", - "MaterializedView", - "AuthorizedView", - "Backup", - "BackupInfo", - "ChangeStreamConfig", - "ColumnFamily", - "EncryptionInfo", - "GcRule", - "ProtoSchema", - "RestoreInfo", - "SchemaBundle", - "Snapshot", - "Table", - "RestoreSourceType", - "Type", -) diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py deleted file mode 100644 index 4197ed0b7..000000000 --- a/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ /dev/null @@ -1,1364 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.bigtable_admin_v2.types import instance as gba_instance -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package="google.bigtable.admin.v2", - manifest={ - "CreateInstanceRequest", - "GetInstanceRequest", - "ListInstancesRequest", - "ListInstancesResponse", - "PartialUpdateInstanceRequest", - "DeleteInstanceRequest", - "CreateClusterRequest", - "GetClusterRequest", - "ListClustersRequest", - "ListClustersResponse", - "DeleteClusterRequest", - "CreateInstanceMetadata", - "UpdateInstanceMetadata", - "CreateClusterMetadata", - "UpdateClusterMetadata", - "PartialUpdateClusterMetadata", - "PartialUpdateClusterRequest", - "CreateAppProfileRequest", - "GetAppProfileRequest", - "ListAppProfilesRequest", - "ListAppProfilesResponse", - "UpdateAppProfileRequest", - "DeleteAppProfileRequest", - "UpdateAppProfileMetadata", - "ListHotTabletsRequest", - "ListHotTabletsResponse", - "CreateLogicalViewRequest", - "CreateLogicalViewMetadata", - "GetLogicalViewRequest", - "ListLogicalViewsRequest", - "ListLogicalViewsResponse", - "UpdateLogicalViewRequest", - "UpdateLogicalViewMetadata", - "DeleteLogicalViewRequest", - "CreateMaterializedViewRequest", - "CreateMaterializedViewMetadata", - "GetMaterializedViewRequest", - "ListMaterializedViewsRequest", - "ListMaterializedViewsResponse", - "UpdateMaterializedViewRequest", - "UpdateMaterializedViewMetadata", - "DeleteMaterializedViewRequest", - }, -) - - -class CreateInstanceRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.CreateInstance. - - Attributes: - parent (str): - Required. The unique name of the project in which to create - the new instance. Values are of the form - ``projects/{project}``. - instance_id (str): - Required. The ID to be used when referring to the new - instance within its project, e.g., just ``myinstance`` - rather than ``projects/myproject/instances/myinstance``. - instance (google.cloud.bigtable_admin_v2.types.Instance): - Required. The instance to create. Fields marked - ``OutputOnly`` must be left blank. - clusters (MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]): - Required. The clusters to be created within the instance, - mapped by desired cluster ID, e.g., just ``mycluster`` - rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - Fields marked ``OutputOnly`` must be left blank. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - instance_id: str = proto.Field( - proto.STRING, - number=2, - ) - instance: gba_instance.Instance = proto.Field( - proto.MESSAGE, - number=3, - message=gba_instance.Instance, - ) - clusters: MutableMapping[str, gba_instance.Cluster] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=4, - message=gba_instance.Cluster, - ) - - -class GetInstanceRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.GetInstance. - - Attributes: - name (str): - Required. The unique name of the requested instance. Values - are of the form ``projects/{project}/instances/{instance}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListInstancesRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.ListInstances. - - Attributes: - parent (str): - Required. The unique name of the project for which a list of - instances is requested. Values are of the form - ``projects/{project}``. - page_token (str): - DEPRECATED: This field is unused and ignored. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class ListInstancesResponse(proto.Message): - r"""Response message for BigtableInstanceAdmin.ListInstances. - - Attributes: - instances (MutableSequence[google.cloud.bigtable_admin_v2.types.Instance]): - The list of requested instances. - failed_locations (MutableSequence[str]): - Locations from which Instance information could not be - retrieved, due to an outage or some other transient - condition. Instances whose Clusters are all in one of the - failed locations may be missing from ``instances``, and - Instances with at least one Cluster in a failed location may - only have partial information returned. Values are of the - form ``projects//locations/`` - next_page_token (str): - DEPRECATED: This field is unused and ignored. - """ - - @property - def raw_page(self): - return self - - instances: MutableSequence[gba_instance.Instance] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_instance.Instance, - ) - failed_locations: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=2, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=3, - ) - - -class PartialUpdateInstanceRequest(proto.Message): - r"""Request message for - BigtableInstanceAdmin.PartialUpdateInstance. - - Attributes: - instance (google.cloud.bigtable_admin_v2.types.Instance): - Required. The Instance which will (partially) - replace the current value. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The subset of Instance fields which - should be replaced. Must be explicitly set. - """ - - instance: gba_instance.Instance = proto.Field( - proto.MESSAGE, - number=1, - message=gba_instance.Instance, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteInstanceRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.DeleteInstance. - - Attributes: - name (str): - Required. The unique name of the instance to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateClusterRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.CreateCluster. - - Attributes: - parent (str): - Required. The unique name of the instance in which to create - the new cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - cluster_id (str): - Required. The ID to be used when referring to the new - cluster within its instance, e.g., just ``mycluster`` rather - than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - cluster (google.cloud.bigtable_admin_v2.types.Cluster): - Required. The cluster to be created. Fields marked - ``OutputOnly`` must be left blank. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - cluster_id: str = proto.Field( - proto.STRING, - number=2, - ) - cluster: gba_instance.Cluster = proto.Field( - proto.MESSAGE, - number=3, - message=gba_instance.Cluster, - ) - - -class GetClusterRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.GetCluster. - - Attributes: - name (str): - Required. The unique name of the requested cluster. Values - are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListClustersRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.ListClusters. - - Attributes: - parent (str): - Required. The unique name of the instance for which a list - of clusters is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use - ``{instance} = '-'`` to list Clusters for all Instances in a - project, e.g., ``projects/myproject/instances/-``. - page_token (str): - DEPRECATED: This field is unused and ignored. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class ListClustersResponse(proto.Message): - r"""Response message for BigtableInstanceAdmin.ListClusters. - - Attributes: - clusters (MutableSequence[google.cloud.bigtable_admin_v2.types.Cluster]): - The list of requested clusters. - failed_locations (MutableSequence[str]): - Locations from which Cluster information could not be - retrieved, due to an outage or some other transient - condition. Clusters from these locations may be missing from - ``clusters``, or may only have partial information returned. - Values are of the form - ``projects//locations/`` - next_page_token (str): - DEPRECATED: This field is unused and ignored. - """ - - @property - def raw_page(self): - return self - - clusters: MutableSequence[gba_instance.Cluster] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_instance.Cluster, - ) - failed_locations: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=2, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=3, - ) - - -class DeleteClusterRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.DeleteCluster. - - Attributes: - name (str): - Required. The unique name of the cluster to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateInstanceMetadata(proto.Message): - r"""The metadata for the Operation returned by CreateInstance. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.CreateInstanceRequest): - The request that prompted the initiation of - this CreateInstance operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - """ - - original_request: "CreateInstanceRequest" = proto.Field( - proto.MESSAGE, - number=1, - message="CreateInstanceRequest", - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class UpdateInstanceMetadata(proto.Message): - r"""The metadata for the Operation returned by UpdateInstance. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest): - The request that prompted the initiation of - this UpdateInstance operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - """ - - original_request: "PartialUpdateInstanceRequest" = proto.Field( - proto.MESSAGE, - number=1, - message="PartialUpdateInstanceRequest", - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class CreateClusterMetadata(proto.Message): - r"""The metadata for the Operation returned by CreateCluster. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.CreateClusterRequest): - The request that prompted the initiation of - this CreateCluster operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - tables (MutableMapping[str, google.cloud.bigtable_admin_v2.types.CreateClusterMetadata.TableProgress]): - Keys: the full ``name`` of each table that existed in the - instance when CreateCluster was first called, i.e. - ``projects//instances//tables/
``. - Any table added to the instance by a later API call will be - created in the new cluster by that API call, not this one. - - Values: information on how much of a table's data has been - copied to the newly-created cluster so far. - """ - - class TableProgress(proto.Message): - r"""Progress info for copying a table's data to the new cluster. - - Attributes: - estimated_size_bytes (int): - Estimate of the size of the table to be - copied. - estimated_copied_bytes (int): - Estimate of the number of bytes copied so far for this - table. This will eventually reach 'estimated_size_bytes' - unless the table copy is CANCELLED. - state (google.cloud.bigtable_admin_v2.types.CreateClusterMetadata.TableProgress.State): - - """ - - class State(proto.Enum): - r""" - - Values: - STATE_UNSPECIFIED (0): - No description available. - PENDING (1): - The table has not yet begun copying to the - new cluster. - COPYING (2): - The table is actively being copied to the new - cluster. - COMPLETED (3): - The table has been fully copied to the new - cluster. - CANCELLED (4): - The table was deleted before it finished - copying to the new cluster. Note that tables - deleted after completion will stay marked as - COMPLETED, not CANCELLED. - """ - STATE_UNSPECIFIED = 0 - PENDING = 1 - COPYING = 2 - COMPLETED = 3 - CANCELLED = 4 - - estimated_size_bytes: int = proto.Field( - proto.INT64, - number=2, - ) - estimated_copied_bytes: int = proto.Field( - proto.INT64, - number=3, - ) - state: "CreateClusterMetadata.TableProgress.State" = proto.Field( - proto.ENUM, - number=4, - enum="CreateClusterMetadata.TableProgress.State", - ) - - original_request: "CreateClusterRequest" = proto.Field( - proto.MESSAGE, - number=1, - message="CreateClusterRequest", - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - tables: MutableMapping[str, TableProgress] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=4, - message=TableProgress, - ) - - -class UpdateClusterMetadata(proto.Message): - r"""The metadata for the Operation returned by UpdateCluster. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.Cluster): - The request that prompted the initiation of - this UpdateCluster operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - """ - - original_request: gba_instance.Cluster = proto.Field( - proto.MESSAGE, - number=1, - message=gba_instance.Cluster, - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class PartialUpdateClusterMetadata(proto.Message): - r"""The metadata for the Operation returned by - PartialUpdateCluster. - - Attributes: - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest): - The original request for - PartialUpdateCluster. - """ - - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - original_request: "PartialUpdateClusterRequest" = proto.Field( - proto.MESSAGE, - number=3, - message="PartialUpdateClusterRequest", - ) - - -class PartialUpdateClusterRequest(proto.Message): - r"""Request message for - BigtableInstanceAdmin.PartialUpdateCluster. - - Attributes: - cluster (google.cloud.bigtable_admin_v2.types.Cluster): - Required. The Cluster which contains the partial updates to - be applied, subject to the update_mask. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The subset of Cluster fields which - should be replaced. - """ - - cluster: gba_instance.Cluster = proto.Field( - proto.MESSAGE, - number=1, - message=gba_instance.Cluster, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class CreateAppProfileRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.CreateAppProfile. - - Attributes: - parent (str): - Required. The unique name of the instance in which to create - the new app profile. Values are of the form - ``projects/{project}/instances/{instance}``. - app_profile_id (str): - Required. The ID to be used when referring to the new app - profile within its instance, e.g., just ``myprofile`` rather - than - ``projects/myproject/instances/myinstance/appProfiles/myprofile``. - app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): - Required. The app profile to be created. Fields marked - ``OutputOnly`` will be ignored. - ignore_warnings (bool): - If true, ignore safety checks when creating - the app profile. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - app_profile_id: str = proto.Field( - proto.STRING, - number=2, - ) - app_profile: gba_instance.AppProfile = proto.Field( - proto.MESSAGE, - number=3, - message=gba_instance.AppProfile, - ) - ignore_warnings: bool = proto.Field( - proto.BOOL, - number=4, - ) - - -class GetAppProfileRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.GetAppProfile. - - Attributes: - name (str): - Required. The unique name of the requested app profile. - Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListAppProfilesRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.ListAppProfiles. - - Attributes: - parent (str): - Required. The unique name of the instance for which a list - of app profiles is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use - ``{instance} = '-'`` to list AppProfiles for all Instances - in a project, e.g., ``projects/myproject/instances/-``. - page_size (int): - Maximum number of results per page. - - A page_size of zero lets the server choose the number of - items to return. A page_size which is strictly positive will - return at most that many items. A negative page_size will - cause an error. - - Following the first request, subsequent paginated calls are - not required to pass a page_size. If a page_size is set in - subsequent calls, it must match the page_size given in the - first request. - page_token (str): - The value of ``next_page_token`` returned by a previous - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - page_size: int = proto.Field( - proto.INT32, - number=3, - ) - page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class ListAppProfilesResponse(proto.Message): - r"""Response message for BigtableInstanceAdmin.ListAppProfiles. - - Attributes: - app_profiles (MutableSequence[google.cloud.bigtable_admin_v2.types.AppProfile]): - The list of requested app profiles. - next_page_token (str): - Set if not all app profiles could be returned in a single - response. Pass this value to ``page_token`` in another - request to get the next page of results. - failed_locations (MutableSequence[str]): - Locations from which AppProfile information could not be - retrieved, due to an outage or some other transient - condition. AppProfiles from these locations may be missing - from ``app_profiles``. Values are of the form - ``projects//locations/`` - """ - - @property - def raw_page(self): - return self - - app_profiles: MutableSequence[gba_instance.AppProfile] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_instance.AppProfile, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - failed_locations: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=3, - ) - - -class UpdateAppProfileRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.UpdateAppProfile. - - Attributes: - app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): - Required. The app profile which will - (partially) replace the current value. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The subset of app profile fields - which should be replaced. If unset, all fields - will be replaced. - ignore_warnings (bool): - If true, ignore safety checks when updating - the app profile. - """ - - app_profile: gba_instance.AppProfile = proto.Field( - proto.MESSAGE, - number=1, - message=gba_instance.AppProfile, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - ignore_warnings: bool = proto.Field( - proto.BOOL, - number=3, - ) - - -class DeleteAppProfileRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.DeleteAppProfile. - - Attributes: - name (str): - Required. The unique name of the app profile to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - ignore_warnings (bool): - Required. If true, ignore safety checks when - deleting the app profile. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - ignore_warnings: bool = proto.Field( - proto.BOOL, - number=2, - ) - - -class UpdateAppProfileMetadata(proto.Message): - r"""The metadata for the Operation returned by UpdateAppProfile.""" - - -class ListHotTabletsRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.ListHotTablets. - - Attributes: - parent (str): - Required. The cluster name to list hot tablets. Value is in - the following form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The start time to list hot tablets. The hot - tablets in the response will have start times - between the requested start time and end time. - Start time defaults to Now if it is unset, and - end time defaults to Now - 24 hours if it is - unset. The start time should be less than the - end time, and the maximum allowed time range - between start time and end time is 48 hours. - Start time and end time should have values - between Now and Now - 14 days. - end_time (google.protobuf.timestamp_pb2.Timestamp): - The end time to list hot tablets. - page_size (int): - Maximum number of results per page. - - A page_size that is empty or zero lets the server choose the - number of items to return. A page_size which is strictly - positive will return at most that many items. A negative - page_size will cause an error. - - Following the first request, subsequent paginated calls do - not need a page_size field. If a page_size is set in - subsequent calls, it must match the page_size given in the - first request. - page_token (str): - The value of ``next_page_token`` returned by a previous - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=5, - ) - - -class ListHotTabletsResponse(proto.Message): - r"""Response message for BigtableInstanceAdmin.ListHotTablets. - - Attributes: - hot_tablets (MutableSequence[google.cloud.bigtable_admin_v2.types.HotTablet]): - List of hot tablets in the tables of the - requested cluster that fall within the requested - time range. Hot tablets are ordered by node cpu - usage percent. If there are multiple hot tablets - that correspond to the same tablet within a - 15-minute interval, only the hot tablet with the - highest node cpu usage will be included in the - response. - next_page_token (str): - Set if not all hot tablets could be returned in a single - response. Pass this value to ``page_token`` in another - request to get the next page of results. - """ - - @property - def raw_page(self): - return self - - hot_tablets: MutableSequence[gba_instance.HotTablet] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_instance.HotTablet, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class CreateLogicalViewRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.CreateLogicalView. - - Attributes: - parent (str): - Required. The parent instance where this logical view will - be created. Format: - ``projects/{project}/instances/{instance}``. - logical_view_id (str): - Required. The ID to use for the logical view, - which will become the final component of the - logical view's resource name. - logical_view (google.cloud.bigtable_admin_v2.types.LogicalView): - Required. The logical view to create. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - logical_view_id: str = proto.Field( - proto.STRING, - number=2, - ) - logical_view: gba_instance.LogicalView = proto.Field( - proto.MESSAGE, - number=3, - message=gba_instance.LogicalView, - ) - - -class CreateLogicalViewMetadata(proto.Message): - r"""The metadata for the Operation returned by CreateLogicalView. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest): - The request that prompted the initiation of - this CreateLogicalView operation. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which this operation started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - finished or was canceled. - """ - - original_request: "CreateLogicalViewRequest" = proto.Field( - proto.MESSAGE, - number=1, - message="CreateLogicalViewRequest", - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class GetLogicalViewRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.GetLogicalView. - - Attributes: - name (str): - Required. The unique name of the requested logical view. - Values are of the form - ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListLogicalViewsRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.ListLogicalViews. - - Attributes: - parent (str): - Required. The unique name of the instance for which the list - of logical views is requested. Values are of the form - ``projects/{project}/instances/{instance}``. - page_size (int): - Optional. The maximum number of logical views - to return. The service may return fewer than - this value - page_token (str): - Optional. A page token, received from a previous - ``ListLogicalViews`` call. Provide this to retrieve the - subsequent page. - - When paginating, all other parameters provided to - ``ListLogicalViews`` must match the call that provided the - page token. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - page_size: int = proto.Field( - proto.INT32, - number=2, - ) - page_token: str = proto.Field( - proto.STRING, - number=3, - ) - - -class ListLogicalViewsResponse(proto.Message): - r"""Response message for BigtableInstanceAdmin.ListLogicalViews. - - Attributes: - logical_views (MutableSequence[google.cloud.bigtable_admin_v2.types.LogicalView]): - The list of requested logical views. - next_page_token (str): - A token, which can be sent as ``page_token`` to retrieve the - next page. If this field is omitted, there are no subsequent - pages. - """ - - @property - def raw_page(self): - return self - - logical_views: MutableSequence[gba_instance.LogicalView] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_instance.LogicalView, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateLogicalViewRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.UpdateLogicalView. - - Attributes: - logical_view (google.cloud.bigtable_admin_v2.types.LogicalView): - Required. The logical view to update. - - The logical view's ``name`` field is used to identify the - view to update. Format: - ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. The list of fields to update. - """ - - logical_view: gba_instance.LogicalView = proto.Field( - proto.MESSAGE, - number=1, - message=gba_instance.LogicalView, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateLogicalViewMetadata(proto.Message): - r"""The metadata for the Operation returned by UpdateLogicalView. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest): - The request that prompted the initiation of - this UpdateLogicalView operation. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which this operation was started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - finished or was canceled. - """ - - original_request: "UpdateLogicalViewRequest" = proto.Field( - proto.MESSAGE, - number=1, - message="UpdateLogicalViewRequest", - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class DeleteLogicalViewRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.DeleteLogicalView. - - Attributes: - name (str): - Required. The unique name of the logical view to be deleted. - Format: - ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. - etag (str): - Optional. The current etag of the logical - view. If an etag is provided and does not match - the current etag of the logical view, deletion - will be blocked and an ABORTED error will be - returned. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - etag: str = proto.Field( - proto.STRING, - number=2, - ) - - -class CreateMaterializedViewRequest(proto.Message): - r"""Request message for - BigtableInstanceAdmin.CreateMaterializedView. - - Attributes: - parent (str): - Required. The parent instance where this materialized view - will be created. Format: - ``projects/{project}/instances/{instance}``. - materialized_view_id (str): - Required. The ID to use for the materialized - view, which will become the final component of - the materialized view's resource name. - materialized_view (google.cloud.bigtable_admin_v2.types.MaterializedView): - Required. The materialized view to create. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - materialized_view_id: str = proto.Field( - proto.STRING, - number=2, - ) - materialized_view: gba_instance.MaterializedView = proto.Field( - proto.MESSAGE, - number=3, - message=gba_instance.MaterializedView, - ) - - -class CreateMaterializedViewMetadata(proto.Message): - r"""The metadata for the Operation returned by - CreateMaterializedView. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest): - The request that prompted the initiation of - this CreateMaterializedView operation. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which this operation started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - finished or was canceled. - """ - - original_request: "CreateMaterializedViewRequest" = proto.Field( - proto.MESSAGE, - number=1, - message="CreateMaterializedViewRequest", - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class GetMaterializedViewRequest(proto.Message): - r"""Request message for - BigtableInstanceAdmin.GetMaterializedView. - - Attributes: - name (str): - Required. The unique name of the requested materialized - view. Values are of the form - ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListMaterializedViewsRequest(proto.Message): - r"""Request message for - BigtableInstanceAdmin.ListMaterializedViews. - - Attributes: - parent (str): - Required. The unique name of the instance for which the list - of materialized views is requested. Values are of the form - ``projects/{project}/instances/{instance}``. - page_size (int): - Optional. The maximum number of materialized - views to return. The service may return fewer - than this value - page_token (str): - Optional. A page token, received from a previous - ``ListMaterializedViews`` call. Provide this to retrieve the - subsequent page. - - When paginating, all other parameters provided to - ``ListMaterializedViews`` must match the call that provided - the page token. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - page_size: int = proto.Field( - proto.INT32, - number=2, - ) - page_token: str = proto.Field( - proto.STRING, - number=3, - ) - - -class ListMaterializedViewsResponse(proto.Message): - r"""Response message for - BigtableInstanceAdmin.ListMaterializedViews. - - Attributes: - materialized_views (MutableSequence[google.cloud.bigtable_admin_v2.types.MaterializedView]): - The list of requested materialized views. - next_page_token (str): - A token, which can be sent as ``page_token`` to retrieve the - next page. If this field is omitted, there are no subsequent - pages. - """ - - @property - def raw_page(self): - return self - - materialized_views: MutableSequence[ - gba_instance.MaterializedView - ] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_instance.MaterializedView, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateMaterializedViewRequest(proto.Message): - r"""Request message for - BigtableInstanceAdmin.UpdateMaterializedView. - - Attributes: - materialized_view (google.cloud.bigtable_admin_v2.types.MaterializedView): - Required. The materialized view to update. - - The materialized view's ``name`` field is used to identify - the view to update. Format: - ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. The list of fields to update. - """ - - materialized_view: gba_instance.MaterializedView = proto.Field( - proto.MESSAGE, - number=1, - message=gba_instance.MaterializedView, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateMaterializedViewMetadata(proto.Message): - r"""The metadata for the Operation returned by - UpdateMaterializedView. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest): - The request that prompted the initiation of - this UpdateMaterializedView operation. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which this operation was started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - finished or was canceled. - """ - - original_request: "UpdateMaterializedViewRequest" = proto.Field( - proto.MESSAGE, - number=1, - message="UpdateMaterializedViewRequest", - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class DeleteMaterializedViewRequest(proto.Message): - r"""Request message for - BigtableInstanceAdmin.DeleteMaterializedView. - - Attributes: - name (str): - Required. The unique name of the materialized view to be - deleted. Format: - ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. - etag (str): - Optional. The current etag of the - materialized view. If an etag is provided and - does not match the current etag of the - materialized view, deletion will be blocked and - an ABORTED error will be returned. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - etag: str = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py deleted file mode 100644 index d6403fc2a..000000000 --- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ /dev/null @@ -1,1966 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.bigtable_admin_v2.types import common -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package="google.bigtable.admin.v2", - manifest={ - "RestoreTableRequest", - "RestoreTableMetadata", - "OptimizeRestoredTableMetadata", - "CreateTableRequest", - "CreateTableFromSnapshotRequest", - "DropRowRangeRequest", - "ListTablesRequest", - "ListTablesResponse", - "GetTableRequest", - "UpdateTableRequest", - "UpdateTableMetadata", - "DeleteTableRequest", - "UndeleteTableRequest", - "UndeleteTableMetadata", - "ModifyColumnFamiliesRequest", - "GenerateConsistencyTokenRequest", - "GenerateConsistencyTokenResponse", - "CheckConsistencyRequest", - "StandardReadRemoteWrites", - "DataBoostReadLocalWrites", - "CheckConsistencyResponse", - "SnapshotTableRequest", - "GetSnapshotRequest", - "ListSnapshotsRequest", - "ListSnapshotsResponse", - "DeleteSnapshotRequest", - "SnapshotTableMetadata", - "CreateTableFromSnapshotMetadata", - "CreateBackupRequest", - "CreateBackupMetadata", - "UpdateBackupRequest", - "GetBackupRequest", - "DeleteBackupRequest", - "ListBackupsRequest", - "ListBackupsResponse", - "CopyBackupRequest", - "CopyBackupMetadata", - "CreateAuthorizedViewRequest", - "CreateAuthorizedViewMetadata", - "ListAuthorizedViewsRequest", - "ListAuthorizedViewsResponse", - "GetAuthorizedViewRequest", - "UpdateAuthorizedViewRequest", - "UpdateAuthorizedViewMetadata", - "DeleteAuthorizedViewRequest", - "CreateSchemaBundleRequest", - "CreateSchemaBundleMetadata", - "UpdateSchemaBundleRequest", - "UpdateSchemaBundleMetadata", - "GetSchemaBundleRequest", - "ListSchemaBundlesRequest", - "ListSchemaBundlesResponse", - "DeleteSchemaBundleRequest", - }, -) - - -class RestoreTableRequest(proto.Message): - r"""The request for - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - parent (str): - Required. The name of the instance in which to create the - restored table. Values are of the form - ``projects//instances/``. - table_id (str): - Required. The id of the table to create and restore to. This - table must not already exist. The ``table_id`` appended to - ``parent`` forms the full table name of the form - ``projects//instances//tables/``. - backup (str): - Name of the backup from which to restore. Values are of the - form - ``projects//instances//clusters//backups/``. - - This field is a member of `oneof`_ ``source``. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - table_id: str = proto.Field( - proto.STRING, - number=2, - ) - backup: str = proto.Field( - proto.STRING, - number=3, - oneof="source", - ) - - -class RestoreTableMetadata(proto.Message): - r"""Metadata type for the long-running operation returned by - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - Name of the table being created and restored - to. - source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): - The type of the restore source. - backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): - - This field is a member of `oneof`_ ``source_info``. - optimize_table_operation_name (str): - If exists, the name of the long-running operation that will - be used to track the post-restore optimization process to - optimize the performance of the restored table. The metadata - type of the long-running operation is - [OptimizeRestoreTableMetadata][]. The response type is - [Empty][google.protobuf.Empty]. This long-running operation - may be automatically created by the system if applicable - after the RestoreTable long-running operation completes - successfully. This operation may not be created if the table - is already optimized or the restore was not successful. - progress (google.cloud.bigtable_admin_v2.types.OperationProgress): - The progress of the - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] - operation. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - source_type: gba_table.RestoreSourceType = proto.Field( - proto.ENUM, - number=2, - enum=gba_table.RestoreSourceType, - ) - backup_info: gba_table.BackupInfo = proto.Field( - proto.MESSAGE, - number=3, - oneof="source_info", - message=gba_table.BackupInfo, - ) - optimize_table_operation_name: str = proto.Field( - proto.STRING, - number=4, - ) - progress: common.OperationProgress = proto.Field( - proto.MESSAGE, - number=5, - message=common.OperationProgress, - ) - - -class OptimizeRestoredTableMetadata(proto.Message): - r"""Metadata type for the long-running operation used to track - the progress of optimizations performed on a newly restored - table. This long-running operation is automatically created by - the system after the successful completion of a table restore, - and cannot be cancelled. - - Attributes: - name (str): - Name of the restored table being optimized. - progress (google.cloud.bigtable_admin_v2.types.OperationProgress): - The progress of the post-restore - optimizations. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - progress: common.OperationProgress = proto.Field( - proto.MESSAGE, - number=2, - message=common.OperationProgress, - ) - - -class CreateTableRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - - Attributes: - parent (str): - Required. The unique name of the instance in which to create - the table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id (str): - Required. The name by which the new table should be referred - to within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. Maximum 50 characters. - table (google.cloud.bigtable_admin_v2.types.Table): - Required. The Table to create. - initial_splits (MutableSequence[google.cloud.bigtable_admin_v2.types.CreateTableRequest.Split]): - The optional list of row keys that will be used to initially - split the table into several tablets (tablets are similar to - HBase regions). Given two split keys, ``s1`` and ``s2``, - three tablets will be created, spanning the key ranges: - ``[, s1), [s1, s2), [s2, )``. - - Example: - - - Row keys := - ``["a", "apple", "custom", "customer_1", "customer_2",`` - ``"other", "zz"]`` - - initial_split_keys := - ``["apple", "customer_1", "customer_2", "other"]`` - - Key assignment: - - - Tablet 1 ``[, apple) => {"a"}.`` - - Tablet 2 - ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 - ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` - """ - - class Split(proto.Message): - r"""An initial split point for a newly created table. - - Attributes: - key (bytes): - Row key to use as an initial tablet boundary. - """ - - key: bytes = proto.Field( - proto.BYTES, - number=1, - ) - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - table_id: str = proto.Field( - proto.STRING, - number=2, - ) - table: gba_table.Table = proto.Field( - proto.MESSAGE, - number=3, - message=gba_table.Table, - ) - initial_splits: MutableSequence[Split] = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=Split, - ) - - -class CreateTableFromSnapshotRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] - - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible - ways and is not recommended for production use. It is not subject to - any SLA or deprecation policy. - - Attributes: - parent (str): - Required. The unique name of the instance in which to create - the table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id (str): - Required. The name by which the new table should be referred - to within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. - source_snapshot (str): - Required. The unique name of the snapshot from which to - restore the table. The snapshot and the table must be in the - same instance. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - table_id: str = proto.Field( - proto.STRING, - number=2, - ) - source_snapshot: str = proto.Field( - proto.STRING, - number=3, - ) - - -class DropRowRangeRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - Required. The unique name of the table on which to drop a - range of rows. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - row_key_prefix (bytes): - Delete all rows that start with this row key - prefix. Prefix cannot be zero length. - - This field is a member of `oneof`_ ``target``. - delete_all_data_from_table (bool): - Delete all rows in the table. Setting this to - false is a no-op. - - This field is a member of `oneof`_ ``target``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - row_key_prefix: bytes = proto.Field( - proto.BYTES, - number=2, - oneof="target", - ) - delete_all_data_from_table: bool = proto.Field( - proto.BOOL, - number=3, - oneof="target", - ) - - -class ListTablesRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Attributes: - parent (str): - Required. The unique name of the instance for which tables - should be listed. Values are of the form - ``projects/{project}/instances/{instance}``. - view (google.cloud.bigtable_admin_v2.types.Table.View): - The view to be applied to the returned tables' fields. - NAME_ONLY view (default) and REPLICATION_VIEW are supported. - page_size (int): - Maximum number of results per page. - - A page_size of zero lets the server choose the number of - items to return. A page_size which is strictly positive will - return at most that many items. A negative page_size will - cause an error. - - Following the first request, subsequent paginated calls are - not required to pass a page_size. If a page_size is set in - subsequent calls, it must match the page_size given in the - first request. - page_token (str): - The value of ``next_page_token`` returned by a previous - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - view: gba_table.Table.View = proto.Field( - proto.ENUM, - number=2, - enum=gba_table.Table.View, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=3, - ) - - -class ListTablesResponse(proto.Message): - r"""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Attributes: - tables (MutableSequence[google.cloud.bigtable_admin_v2.types.Table]): - The tables present in the requested instance. - next_page_token (str): - Set if not all tables could be returned in a single - response. Pass this value to ``page_token`` in another - request to get the next page of results. - """ - - @property - def raw_page(self): - return self - - tables: MutableSequence[gba_table.Table] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_table.Table, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class GetTableRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - - Attributes: - name (str): - Required. The unique name of the requested table. Values are - of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - view (google.cloud.bigtable_admin_v2.types.Table.View): - The view to be applied to the returned table's fields. - Defaults to ``SCHEMA_VIEW`` if unspecified. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - view: gba_table.Table.View = proto.Field( - proto.ENUM, - number=2, - enum=gba_table.Table.View, - ) - - -class UpdateTableRequest(proto.Message): - r"""The request for - [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - - Attributes: - table (google.cloud.bigtable_admin_v2.types.Table): - Required. The table to update. The table's ``name`` field is - used to identify the table to update. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The list of fields to update. A mask specifying - which fields (e.g. ``change_stream_config``) in the - ``table`` field should be updated. This mask is relative to - the ``table`` field, not to the request message. The - wildcard (*) path is currently not supported. Currently - UpdateTable is only supported for the following fields: - - - ``change_stream_config`` - - ``change_stream_config.retention_period`` - - ``deletion_protection`` - - ``row_key_schema`` - - If ``column_families`` is set in ``update_mask``, it will - return an UNIMPLEMENTED error. - ignore_warnings (bool): - Optional. If true, ignore safety checks when - updating the table. - """ - - table: gba_table.Table = proto.Field( - proto.MESSAGE, - number=1, - message=gba_table.Table, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - ignore_warnings: bool = proto.Field( - proto.BOOL, - number=3, - ) - - -class UpdateTableMetadata(proto.Message): - r"""Metadata type for the operation returned by - [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - - Attributes: - name (str): - The name of the table being updated. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which this operation started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - finished or was canceled. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class DeleteTableRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - - Attributes: - name (str): - Required. The unique name of the table to be deleted. Values - are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class UndeleteTableRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] - - Attributes: - name (str): - Required. The unique name of the table to be restored. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class UndeleteTableMetadata(proto.Message): - r"""Metadata type for the operation returned by - [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable]. - - Attributes: - name (str): - The name of the table being restored. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which this operation started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - finished or was cancelled. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class ModifyColumnFamiliesRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - - Attributes: - name (str): - Required. The unique name of the table whose families should - be modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - modifications (MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): - Required. Modifications to be atomically - applied to the specified table's families. - Entries are applied in order, meaning that - earlier modifications can be masked by later - ones (in the case of repeated updates to the - same family, for example). - ignore_warnings (bool): - Optional. If true, ignore safety checks when - modifying the column families. - """ - - class Modification(proto.Message): - r"""A create, update, or delete of a particular column family. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - id (str): - The ID of the column family to be modified. - create (google.cloud.bigtable_admin_v2.types.ColumnFamily): - Create a new column family with the specified - schema, or fail if one already exists with the - given ID. - - This field is a member of `oneof`_ ``mod``. - update (google.cloud.bigtable_admin_v2.types.ColumnFamily): - Update an existing column family to the - specified schema, or fail if no column family - exists with the given ID. - - This field is a member of `oneof`_ ``mod``. - drop (bool): - Drop (delete) the column family with the - given ID, or fail if no such family exists. - - This field is a member of `oneof`_ ``mod``. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. A mask specifying which fields (e.g. ``gc_rule``) - in the ``update`` mod should be updated, ignored for other - modification types. If unset or empty, we treat it as - updating ``gc_rule`` to be backward compatible. - """ - - id: str = proto.Field( - proto.STRING, - number=1, - ) - create: gba_table.ColumnFamily = proto.Field( - proto.MESSAGE, - number=2, - oneof="mod", - message=gba_table.ColumnFamily, - ) - update: gba_table.ColumnFamily = proto.Field( - proto.MESSAGE, - number=3, - oneof="mod", - message=gba_table.ColumnFamily, - ) - drop: bool = proto.Field( - proto.BOOL, - number=4, - oneof="mod", - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - name: str = proto.Field( - proto.STRING, - number=1, - ) - modifications: MutableSequence[Modification] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=Modification, - ) - ignore_warnings: bool = proto.Field( - proto.BOOL, - number=3, - ) - - -class GenerateConsistencyTokenRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - Attributes: - name (str): - Required. The unique name of the Table for which to create a - consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class GenerateConsistencyTokenResponse(proto.Message): - r"""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - Attributes: - consistency_token (str): - The generated consistency token. - """ - - consistency_token: str = proto.Field( - proto.STRING, - number=1, - ) - - -class CheckConsistencyRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - Required. The unique name of the Table for which to check - replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - consistency_token (str): - Required. The token created using - GenerateConsistencyToken for the Table. - standard_read_remote_writes (google.cloud.bigtable_admin_v2.types.StandardReadRemoteWrites): - Checks that reads using an app profile with - ``StandardIsolation`` can see all writes committed before - the token was created, even if the read and write target - different clusters. - - This field is a member of `oneof`_ ``mode``. - data_boost_read_local_writes (google.cloud.bigtable_admin_v2.types.DataBoostReadLocalWrites): - Checks that reads using an app profile with - ``DataBoostIsolationReadOnly`` can see all writes committed - before the token was created, but only if the read and write - target the same cluster. - - This field is a member of `oneof`_ ``mode``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - consistency_token: str = proto.Field( - proto.STRING, - number=2, - ) - standard_read_remote_writes: "StandardReadRemoteWrites" = proto.Field( - proto.MESSAGE, - number=3, - oneof="mode", - message="StandardReadRemoteWrites", - ) - data_boost_read_local_writes: "DataBoostReadLocalWrites" = proto.Field( - proto.MESSAGE, - number=4, - oneof="mode", - message="DataBoostReadLocalWrites", - ) - - -class StandardReadRemoteWrites(proto.Message): - r"""Checks that all writes before the consistency token was - generated are replicated in every cluster and readable. - - """ - - -class DataBoostReadLocalWrites(proto.Message): - r"""Checks that all writes before the consistency token was - generated in the same cluster are readable by Databoost. - - """ - - -class CheckConsistencyResponse(proto.Message): - r"""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - Attributes: - consistent (bool): - True only if the token is consistent. A token - is consistent if replication has caught up with - the restrictions specified in the request. - """ - - consistent: bool = proto.Field( - proto.BOOL, - number=1, - ) - - -class SnapshotTableRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible - ways and is not recommended for production use. It is not subject to - any SLA or deprecation policy. - - Attributes: - name (str): - Required. The unique name of the table to have the snapshot - taken. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - cluster (str): - Required. The name of the cluster where the snapshot will be - created in. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - snapshot_id (str): - Required. The ID by which the new snapshot should be - referred to within the parent cluster, e.g., ``mysnapshot`` - of the form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. - ttl (google.protobuf.duration_pb2.Duration): - The amount of time that the new snapshot can - stay active after it is created. Once 'ttl' - expires, the snapshot will get deleted. The - maximum amount of time a snapshot can stay - active is 7 days. If 'ttl' is not specified, the - default value of 24 hours will be used. - description (str): - Description of the snapshot. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - cluster: str = proto.Field( - proto.STRING, - number=2, - ) - snapshot_id: str = proto.Field( - proto.STRING, - number=3, - ) - ttl: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - description: str = proto.Field( - proto.STRING, - number=5, - ) - - -class GetSnapshotRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible - ways and is not recommended for production use. It is not subject to - any SLA or deprecation policy. - - Attributes: - name (str): - Required. The unique name of the requested snapshot. Values - are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListSnapshotsRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible - ways and is not recommended for production use. It is not subject to - any SLA or deprecation policy. - - Attributes: - parent (str): - Required. The unique name of the cluster for which snapshots - should be listed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list snapshots for all clusters - in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - page_size (int): - The maximum number of snapshots to return per - page. CURRENTLY UNIMPLEMENTED AND IGNORED. - page_token (str): - The value of ``next_page_token`` returned by a previous - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - page_size: int = proto.Field( - proto.INT32, - number=2, - ) - page_token: str = proto.Field( - proto.STRING, - number=3, - ) - - -class ListSnapshotsResponse(proto.Message): - r"""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible - ways and is not recommended for production use. It is not subject to - any SLA or deprecation policy. - - Attributes: - snapshots (MutableSequence[google.cloud.bigtable_admin_v2.types.Snapshot]): - The snapshots present in the requested - cluster. - next_page_token (str): - Set if not all snapshots could be returned in a single - response. Pass this value to ``page_token`` in another - request to get the next page of results. - """ - - @property - def raw_page(self): - return self - - snapshots: MutableSequence[gba_table.Snapshot] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_table.Snapshot, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteSnapshotRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible - ways and is not recommended for production use. It is not subject to - any SLA or deprecation policy. - - Attributes: - name (str): - Required. The unique name of the snapshot to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class SnapshotTableMetadata(proto.Message): - r"""The metadata for the Operation returned by SnapshotTable. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to most Cloud - Bigtable customers. This feature might be changed in - backward-incompatible ways and is not recommended for production - use. It is not subject to any SLA or deprecation policy. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.SnapshotTableRequest): - The request that prompted the initiation of - this SnapshotTable operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - """ - - original_request: "SnapshotTableRequest" = proto.Field( - proto.MESSAGE, - number=1, - message="SnapshotTableRequest", - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class CreateTableFromSnapshotMetadata(proto.Message): - r"""The metadata for the Operation returned by - CreateTableFromSnapshot. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to most Cloud - Bigtable customers. This feature might be changed in - backward-incompatible ways and is not recommended for production - use. It is not subject to any SLA or deprecation policy. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest): - The request that prompted the initiation of - this CreateTableFromSnapshot operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - """ - - original_request: "CreateTableFromSnapshotRequest" = proto.Field( - proto.MESSAGE, - number=1, - message="CreateTableFromSnapshotRequest", - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class CreateBackupRequest(proto.Message): - r"""The request for - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - - Attributes: - parent (str): - Required. This must be one of the clusters in the instance - in which this table is located. The backup will be stored in - this cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - backup_id (str): - Required. The id of the backup to be created. The - ``backup_id`` along with the parent ``parent`` are combined - as {parent}/backups/{backup_id} to create the full backup - name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in length - and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - backup (google.cloud.bigtable_admin_v2.types.Backup): - Required. The backup to create. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - backup_id: str = proto.Field( - proto.STRING, - number=2, - ) - backup: gba_table.Backup = proto.Field( - proto.MESSAGE, - number=3, - message=gba_table.Backup, - ) - - -class CreateBackupMetadata(proto.Message): - r"""Metadata type for the operation returned by - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - - Attributes: - name (str): - The name of the backup being created. - source_table (str): - The name of the table the backup is created - from. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which this operation started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - finished or was cancelled. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - source_table: str = proto.Field( - proto.STRING, - number=2, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class UpdateBackupRequest(proto.Message): - r"""The request for - [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - - Attributes: - backup (google.cloud.bigtable_admin_v2.types.Backup): - Required. The backup to update. ``backup.name``, and the - fields to be updated as specified by ``update_mask`` are - required. Other fields are ignored. Update is only supported - for the following fields: - - - ``backup.expire_time``. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A mask specifying which fields (e.g. - ``expire_time``) in the Backup resource should be updated. - This mask is relative to the Backup resource, not to the - request message. The field mask must always be specified; - this prevents any future fields from being erased - accidentally by clients that do not know about them. - """ - - backup: gba_table.Backup = proto.Field( - proto.MESSAGE, - number=1, - message=gba_table.Backup, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class GetBackupRequest(proto.Message): - r"""The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - - Attributes: - name (str): - Required. Name of the backup. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class DeleteBackupRequest(proto.Message): - r"""The request for - [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - - Attributes: - name (str): - Required. Name of the backup to delete. Values are of the - form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListBackupsRequest(proto.Message): - r"""The request for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - - Attributes: - parent (str): - Required. The cluster to list backups from. Values are of - the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list backups for all clusters in - an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - filter (str): - A filter expression that filters backups listed in the - response. The expression must specify the field name, a - comparison operator, and the value that you want to use for - filtering. The value must be a string, a number, or a - boolean. The comparison operator must be <, >, <=, >=, !=, - =, or :. Colon ':' represents a HAS operator which is - roughly synonymous with equality. Filter rules are case - insensitive. - - The fields eligible for filtering are: - - - ``name`` - - ``source_table`` - - ``state`` - - ``start_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - - ``end_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - - ``expire_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - - ``size_bytes`` - - To filter on multiple expressions, provide each separate - expression within parentheses. By default, each expression - is an AND expression. However, you can include AND, OR, and - NOT expressions explicitly. - - Some examples of using filters are: - - - ``name:"exact"`` --> The backup's name is the string - "exact". - - ``name:howl`` --> The backup's name contains the string - "howl". - - ``source_table:prod`` --> The source_table's name - contains the string "prod". - - ``state:CREATING`` --> The backup is pending creation. - - ``state:READY`` --> The backup is fully created and ready - for use. - - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` - --> The backup name contains the string "howl" and - start_time of the backup is before 2018-03-28T14:50:00Z. - - ``size_bytes > 10000000000`` --> The backup's size is - greater than 10GB - order_by (str): - An expression for specifying the sort order of the results - of the request. The string value should specify one or more - fields in [Backup][google.bigtable.admin.v2.Backup]. The - full syntax is described at https://aip.dev/132#ordering. - - Fields supported are: - - - name - - source_table - - expire_time - - start_time - - end_time - - size_bytes - - state - - For example, "start_time". The default sorting order is - ascending. To specify descending order for the field, a - suffix " desc" should be appended to the field name. For - example, "start_time desc". Redundant space characters in - the syntax are insigificant. - - If order_by is empty, results will be sorted by - ``start_time`` in descending order starting from the most - recently created backup. - page_size (int): - Number of backups to be returned in the - response. If 0 or less, defaults to the server's - maximum allowed page size. - page_token (str): - If non-empty, ``page_token`` should contain a - [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] - from a previous - [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] - to the same ``parent`` and with the same ``filter``. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - filter: str = proto.Field( - proto.STRING, - number=2, - ) - order_by: str = proto.Field( - proto.STRING, - number=3, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=5, - ) - - -class ListBackupsResponse(proto.Message): - r"""The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - - Attributes: - backups (MutableSequence[google.cloud.bigtable_admin_v2.types.Backup]): - The list of matching backups. - next_page_token (str): - ``next_page_token`` can be sent in a subsequent - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] - call to fetch more of the matching backups. - """ - - @property - def raw_page(self): - return self - - backups: MutableSequence[gba_table.Backup] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_table.Backup, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class CopyBackupRequest(proto.Message): - r"""The request for - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. - - Attributes: - parent (str): - Required. The name of the destination cluster that will - contain the backup copy. The cluster must already exist. - Values are of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - backup_id (str): - Required. The id of the new backup. The ``backup_id`` along - with ``parent`` are combined as {parent}/backups/{backup_id} - to create the full backup name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in length - and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - source_backup (str): - Required. The source backup to be copied from. The source - backup needs to be in READY state for it to be copied. - Copying a copied backup is not allowed. Once CopyBackup is - in progress, the source backup cannot be deleted or cleaned - up on expiration until CopyBackup is finished. Values are of - the form: - ``projects//instances//clusters//backups/``. - expire_time (google.protobuf.timestamp_pb2.Timestamp): - Required. Required. The expiration time of the copied backup - with microsecond granularity that must be at least 6 hours - and at most 30 days from the time the request is received. - Once the ``expire_time`` has passed, Cloud Bigtable will - delete the backup and free the resources used by the backup. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - backup_id: str = proto.Field( - proto.STRING, - number=2, - ) - source_backup: str = proto.Field( - proto.STRING, - number=3, - ) - expire_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class CopyBackupMetadata(proto.Message): - r"""Metadata type for the google.longrunning.Operation returned by - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. - - Attributes: - name (str): - The name of the backup being created through the copy - operation. Values are of the form - ``projects//instances//clusters//backups/``. - source_backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): - Information about the source backup that is - being copied from. - progress (google.cloud.bigtable_admin_v2.types.OperationProgress): - The progress of the - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup] - operation. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - source_backup_info: gba_table.BackupInfo = proto.Field( - proto.MESSAGE, - number=2, - message=gba_table.BackupInfo, - ) - progress: common.OperationProgress = proto.Field( - proto.MESSAGE, - number=3, - message=common.OperationProgress, - ) - - -class CreateAuthorizedViewRequest(proto.Message): - r"""The request for - [CreateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView] - - Attributes: - parent (str): - Required. This is the name of the table the AuthorizedView - belongs to. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - authorized_view_id (str): - Required. The id of the AuthorizedView to create. This - AuthorizedView must not already exist. The - ``authorized_view_id`` appended to ``parent`` forms the full - AuthorizedView name of the form - ``projects/{project}/instances/{instance}/tables/{table}/authorizedView/{authorized_view}``. - authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): - Required. The AuthorizedView to create. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - authorized_view_id: str = proto.Field( - proto.STRING, - number=2, - ) - authorized_view: gba_table.AuthorizedView = proto.Field( - proto.MESSAGE, - number=3, - message=gba_table.AuthorizedView, - ) - - -class CreateAuthorizedViewMetadata(proto.Message): - r"""The metadata for the Operation returned by - CreateAuthorizedView. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest): - The request that prompted the initiation of - this CreateAuthorizedView operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - """ - - original_request: "CreateAuthorizedViewRequest" = proto.Field( - proto.MESSAGE, - number=1, - message="CreateAuthorizedViewRequest", - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class ListAuthorizedViewsRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] - - Attributes: - parent (str): - Required. The unique name of the table for which - AuthorizedViews should be listed. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - page_size (int): - Optional. Maximum number of results per page. - - A page_size of zero lets the server choose the number of - items to return. A page_size which is strictly positive will - return at most that many items. A negative page_size will - cause an error. - - Following the first request, subsequent paginated calls are - not required to pass a page_size. If a page_size is set in - subsequent calls, it must match the page_size given in the - first request. - page_token (str): - Optional. The value of ``next_page_token`` returned by a - previous call. - view (google.cloud.bigtable_admin_v2.types.AuthorizedView.ResponseView): - Optional. The resource_view to be applied to the returned - AuthorizedViews' fields. Default to NAME_ONLY. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - page_size: int = proto.Field( - proto.INT32, - number=2, - ) - page_token: str = proto.Field( - proto.STRING, - number=3, - ) - view: gba_table.AuthorizedView.ResponseView = proto.Field( - proto.ENUM, - number=4, - enum=gba_table.AuthorizedView.ResponseView, - ) - - -class ListAuthorizedViewsResponse(proto.Message): - r"""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] - - Attributes: - authorized_views (MutableSequence[google.cloud.bigtable_admin_v2.types.AuthorizedView]): - The AuthorizedViews present in the requested - table. - next_page_token (str): - Set if not all tables could be returned in a single - response. Pass this value to ``page_token`` in another - request to get the next page of results. - """ - - @property - def raw_page(self): - return self - - authorized_views: MutableSequence[gba_table.AuthorizedView] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_table.AuthorizedView, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class GetAuthorizedViewRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView] - - Attributes: - name (str): - Required. The unique name of the requested AuthorizedView. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. - view (google.cloud.bigtable_admin_v2.types.AuthorizedView.ResponseView): - Optional. The resource_view to be applied to the returned - AuthorizedView's fields. Default to BASIC. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - view: gba_table.AuthorizedView.ResponseView = proto.Field( - proto.ENUM, - number=2, - enum=gba_table.AuthorizedView.ResponseView, - ) - - -class UpdateAuthorizedViewRequest(proto.Message): - r"""The request for - [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. - - Attributes: - authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): - Required. The AuthorizedView to update. The ``name`` in - ``authorized_view`` is used to identify the AuthorizedView. - AuthorizedView name must in this format: - ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. The list of fields to update. A mask specifying - which fields in the AuthorizedView resource should be - updated. This mask is relative to the AuthorizedView - resource, not to the request message. A field will be - overwritten if it is in the mask. If empty, all fields set - in the request will be overwritten. A special value ``*`` - means to overwrite all fields (including fields not set in - the request). - ignore_warnings (bool): - Optional. If true, ignore the safety checks - when updating the AuthorizedView. - """ - - authorized_view: gba_table.AuthorizedView = proto.Field( - proto.MESSAGE, - number=1, - message=gba_table.AuthorizedView, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - ignore_warnings: bool = proto.Field( - proto.BOOL, - number=3, - ) - - -class UpdateAuthorizedViewMetadata(proto.Message): - r"""Metadata for the google.longrunning.Operation returned by - [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest): - The request that prompted the initiation of - this UpdateAuthorizedView operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - """ - - original_request: "UpdateAuthorizedViewRequest" = proto.Field( - proto.MESSAGE, - number=1, - message="UpdateAuthorizedViewRequest", - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class DeleteAuthorizedViewRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView] - - Attributes: - name (str): - Required. The unique name of the AuthorizedView to be - deleted. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. - etag (str): - Optional. The current etag of the - AuthorizedView. If an etag is provided and does - not match the current etag of the - AuthorizedView, deletion will be blocked and an - ABORTED error will be returned. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - etag: str = proto.Field( - proto.STRING, - number=2, - ) - - -class CreateSchemaBundleRequest(proto.Message): - r"""The request for - [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. - - Attributes: - parent (str): - Required. The parent resource where this schema bundle will - be created. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - schema_bundle_id (str): - Required. The unique ID to use for the schema - bundle, which will become the final component of - the schema bundle's resource name. - schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle): - Required. The schema bundle to create. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - schema_bundle_id: str = proto.Field( - proto.STRING, - number=2, - ) - schema_bundle: gba_table.SchemaBundle = proto.Field( - proto.MESSAGE, - number=3, - message=gba_table.SchemaBundle, - ) - - -class CreateSchemaBundleMetadata(proto.Message): - r"""The metadata for the Operation returned by - [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. - - Attributes: - name (str): - The unique name identifying this schema bundle. Values are - of the form - ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which this operation started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - finished or was canceled. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class UpdateSchemaBundleRequest(proto.Message): - r"""The request for - [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. - - Attributes: - schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle): - Required. The schema bundle to update. - - The schema bundle's ``name`` field is used to identify the - schema bundle to update. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. The list of fields to update. - ignore_warnings (bool): - Optional. If set, ignore the safety checks - when updating the Schema Bundle. The safety - checks are: - - - The new Schema Bundle is backwards compatible - with the existing Schema Bundle. - """ - - schema_bundle: gba_table.SchemaBundle = proto.Field( - proto.MESSAGE, - number=1, - message=gba_table.SchemaBundle, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - ignore_warnings: bool = proto.Field( - proto.BOOL, - number=3, - ) - - -class UpdateSchemaBundleMetadata(proto.Message): - r"""The metadata for the Operation returned by - [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. - - Attributes: - name (str): - The unique name identifying this schema bundle. Values are - of the form - ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which this operation started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - finished or was canceled. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class GetSchemaBundleRequest(proto.Message): - r"""The request for - [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle]. - - Attributes: - name (str): - Required. The unique name of the schema bundle to retrieve. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListSchemaBundlesRequest(proto.Message): - r"""The request for - [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. - - Attributes: - parent (str): - Required. The parent, which owns this collection of schema - bundles. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - page_size (int): - The maximum number of schema bundles to - return. If the value is positive, the server may - return at most this value. If unspecified, the - server will return the maximum allowed page - size. - page_token (str): - A page token, received from a previous ``ListSchemaBundles`` - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - ``ListSchemaBundles`` must match the call that provided the - page token. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - page_size: int = proto.Field( - proto.INT32, - number=2, - ) - page_token: str = proto.Field( - proto.STRING, - number=3, - ) - - -class ListSchemaBundlesResponse(proto.Message): - r"""The response for - [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. - - Attributes: - schema_bundles (MutableSequence[google.cloud.bigtable_admin_v2.types.SchemaBundle]): - The schema bundles from the specified table. - next_page_token (str): - A token, which can be sent as ``page_token`` to retrieve the - next page. If this field is omitted, there are no subsequent - pages. - """ - - @property - def raw_page(self): - return self - - schema_bundles: MutableSequence[gba_table.SchemaBundle] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_table.SchemaBundle, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteSchemaBundleRequest(proto.Message): - r"""The request for - [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle]. - - Attributes: - name (str): - Required. The unique name of the schema bundle to delete. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - etag (str): - Optional. The etag of the schema bundle. - If this is provided, it must match the server's - etag. The server returns an ABORTED error on a - mismatched etag. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - etag: str = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/common.py b/google/cloud/bigtable_admin_v2/types/common.py deleted file mode 100644 index 7b05e5ff5..000000000 --- a/google/cloud/bigtable_admin_v2/types/common.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package="google.bigtable.admin.v2", - manifest={ - "StorageType", - "OperationProgress", - }, -) - - -class StorageType(proto.Enum): - r"""Storage media types for persisting Bigtable data. - - Values: - STORAGE_TYPE_UNSPECIFIED (0): - The user did not specify a storage type. - SSD (1): - Flash (SSD) storage should be used. - HDD (2): - Magnetic drive (HDD) storage should be used. - """ - STORAGE_TYPE_UNSPECIFIED = 0 - SSD = 1 - HDD = 2 - - -class OperationProgress(proto.Message): - r"""Encapsulates progress related information for a Cloud - Bigtable long running operation. - - Attributes: - progress_percent (int): - Percent completion of the operation. - Values are between 0 and 100 inclusive. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Time the request was received. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - failed or was completed successfully. - """ - - progress_percent: int = proto.Field( - proto.INT32, - number=1, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/instance.py b/google/cloud/bigtable_admin_v2/types/instance.py deleted file mode 100644 index 2623b770e..000000000 --- a/google/cloud/bigtable_admin_v2/types/instance.py +++ /dev/null @@ -1,826 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.bigtable_admin_v2.types import common -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package="google.bigtable.admin.v2", - manifest={ - "Instance", - "AutoscalingTargets", - "AutoscalingLimits", - "Cluster", - "AppProfile", - "HotTablet", - "LogicalView", - "MaterializedView", - }, -) - - -class Instance(proto.Message): - r"""A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] - and the resources that serve them. All tables in an instance are - served from all [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - The unique name of the instance. Values are of the form - ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. - display_name (str): - Required. The descriptive name for this - instance as it appears in UIs. Can be changed at - any time, but should be kept globally unique to - avoid confusion. - state (google.cloud.bigtable_admin_v2.types.Instance.State): - Output only. The current state of the - instance. - type_ (google.cloud.bigtable_admin_v2.types.Instance.Type): - The type of the instance. Defaults to ``PRODUCTION``. - labels (MutableMapping[str, str]): - Labels are a flexible and lightweight mechanism for - organizing cloud resources into groups that reflect a - customer's organizational needs and deployment strategies. - They can be used to filter resources and aggregate metrics. - - - Label keys must be between 1 and 63 characters long and - must conform to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - - Label values must be between 0 and 63 characters long and - must conform to the regular expression: - ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given - resource. - - Keys and values must both be under 128 bytes. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. A commit timestamp representing when this - Instance was created. For instances created before this - field was added (August 2021), this value is - ``seconds: 0, nanos: 1``. - satisfies_pzs (bool): - Output only. Reserved for future use. - - This field is a member of `oneof`_ ``_satisfies_pzs``. - satisfies_pzi (bool): - Output only. Reserved for future use. - - This field is a member of `oneof`_ ``_satisfies_pzi``. - """ - - class State(proto.Enum): - r"""Possible states of an instance. - - Values: - STATE_NOT_KNOWN (0): - The state of the instance could not be - determined. - READY (1): - The instance has been successfully created - and can serve requests to its tables. - CREATING (2): - The instance is currently being created, and - may be destroyed if the creation process - encounters an error. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - class Type(proto.Enum): - r"""The type of the instance. - - Values: - TYPE_UNSPECIFIED (0): - The type of the instance is unspecified. If set when - creating an instance, a ``PRODUCTION`` instance will be - created. If set when updating an instance, the type will be - left unchanged. - PRODUCTION (1): - An instance meant for production use. ``serve_nodes`` must - be set on the cluster. - DEVELOPMENT (2): - DEPRECATED: Prefer PRODUCTION for all use - cases, as it no longer enforces a higher minimum - node count than DEVELOPMENT. - """ - TYPE_UNSPECIFIED = 0 - PRODUCTION = 1 - DEVELOPMENT = 2 - - name: str = proto.Field( - proto.STRING, - number=1, - ) - display_name: str = proto.Field( - proto.STRING, - number=2, - ) - state: State = proto.Field( - proto.ENUM, - number=3, - enum=State, - ) - type_: Type = proto.Field( - proto.ENUM, - number=4, - enum=Type, - ) - labels: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - create_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - satisfies_pzs: bool = proto.Field( - proto.BOOL, - number=8, - optional=True, - ) - satisfies_pzi: bool = proto.Field( - proto.BOOL, - number=11, - optional=True, - ) - - -class AutoscalingTargets(proto.Message): - r"""The Autoscaling targets for a Cluster. These determine the - recommended nodes. - - Attributes: - cpu_utilization_percent (int): - The cpu utilization that the Autoscaler should be trying to - achieve. This number is on a scale from 0 (no utilization) - to 100 (total utilization), and is limited between 10 and - 80, otherwise it will return INVALID_ARGUMENT error. - storage_utilization_gib_per_node (int): - The storage utilization that the Autoscaler should be trying - to achieve. This number is limited between 2560 (2.5TiB) and - 5120 (5TiB) for a SSD cluster and between 8192 (8TiB) and - 16384 (16TiB) for an HDD cluster, otherwise it will return - INVALID_ARGUMENT error. If this value is set to 0, it will - be treated as if it were set to the default value: 2560 for - SSD, 8192 for HDD. - """ - - cpu_utilization_percent: int = proto.Field( - proto.INT32, - number=2, - ) - storage_utilization_gib_per_node: int = proto.Field( - proto.INT32, - number=3, - ) - - -class AutoscalingLimits(proto.Message): - r"""Limits for the number of nodes a Cluster can autoscale - up/down to. - - Attributes: - min_serve_nodes (int): - Required. Minimum number of nodes to scale - down to. - max_serve_nodes (int): - Required. Maximum number of nodes to scale up - to. - """ - - min_serve_nodes: int = proto.Field( - proto.INT32, - number=1, - ) - max_serve_nodes: int = proto.Field( - proto.INT32, - number=2, - ) - - -class Cluster(proto.Message): - r"""A resizable group of nodes in a particular cloud location, capable - of serving all [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - The unique name of the cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. - location (str): - Immutable. The location where this cluster's nodes and - storage reside. For best performance, clients should be - located as close as possible to this cluster. Currently only - zones are supported, so values should be of the form - ``projects/{project}/locations/{zone}``. - state (google.cloud.bigtable_admin_v2.types.Cluster.State): - Output only. The current state of the - cluster. - serve_nodes (int): - The number of nodes in the cluster. If no - value is set, Cloud Bigtable automatically - allocates nodes based on your data footprint and - optimized for 50% storage utilization. - node_scaling_factor (google.cloud.bigtable_admin_v2.types.Cluster.NodeScalingFactor): - Immutable. The node scaling factor of this - cluster. - cluster_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterConfig): - Configuration for this cluster. - - This field is a member of `oneof`_ ``config``. - default_storage_type (google.cloud.bigtable_admin_v2.types.StorageType): - Immutable. The type of storage used by this - cluster to serve its parent instance's tables, - unless explicitly overridden. - encryption_config (google.cloud.bigtable_admin_v2.types.Cluster.EncryptionConfig): - Immutable. The encryption configuration for - CMEK-protected clusters. - """ - - class State(proto.Enum): - r"""Possible states of a cluster. - - Values: - STATE_NOT_KNOWN (0): - The state of the cluster could not be - determined. - READY (1): - The cluster has been successfully created and - is ready to serve requests. - CREATING (2): - The cluster is currently being created, and - may be destroyed if the creation process - encounters an error. A cluster may not be able - to serve requests while being created. - RESIZING (3): - The cluster is currently being resized, and - may revert to its previous node count if the - process encounters an error. A cluster is still - capable of serving requests while being resized, - but may exhibit performance as if its number of - allocated nodes is between the starting and - requested states. - DISABLED (4): - The cluster has no backing nodes. The data - (tables) still exist, but no operations can be - performed on the cluster. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - RESIZING = 3 - DISABLED = 4 - - class NodeScalingFactor(proto.Enum): - r"""Possible node scaling factors of the clusters. Node scaling - delivers better latency and more throughput by removing node - boundaries. - - Values: - NODE_SCALING_FACTOR_UNSPECIFIED (0): - No node scaling specified. Defaults to - NODE_SCALING_FACTOR_1X. - NODE_SCALING_FACTOR_1X (1): - The cluster is running with a scaling factor - of 1. - NODE_SCALING_FACTOR_2X (2): - The cluster is running with a scaling factor of 2. All node - count values must be in increments of 2 with this scaling - factor enabled, otherwise an INVALID_ARGUMENT error will be - returned. - """ - NODE_SCALING_FACTOR_UNSPECIFIED = 0 - NODE_SCALING_FACTOR_1X = 1 - NODE_SCALING_FACTOR_2X = 2 - - class ClusterAutoscalingConfig(proto.Message): - r"""Autoscaling config for a cluster. - - Attributes: - autoscaling_limits (google.cloud.bigtable_admin_v2.types.AutoscalingLimits): - Required. Autoscaling limits for this - cluster. - autoscaling_targets (google.cloud.bigtable_admin_v2.types.AutoscalingTargets): - Required. Autoscaling targets for this - cluster. - """ - - autoscaling_limits: "AutoscalingLimits" = proto.Field( - proto.MESSAGE, - number=1, - message="AutoscalingLimits", - ) - autoscaling_targets: "AutoscalingTargets" = proto.Field( - proto.MESSAGE, - number=2, - message="AutoscalingTargets", - ) - - class ClusterConfig(proto.Message): - r"""Configuration for a cluster. - - Attributes: - cluster_autoscaling_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterAutoscalingConfig): - Autoscaling configuration for this cluster. - """ - - cluster_autoscaling_config: "Cluster.ClusterAutoscalingConfig" = proto.Field( - proto.MESSAGE, - number=1, - message="Cluster.ClusterAutoscalingConfig", - ) - - class EncryptionConfig(proto.Message): - r"""Cloud Key Management Service (Cloud KMS) settings for a - CMEK-protected cluster. - - Attributes: - kms_key_name (str): - Describes the Cloud KMS encryption key that will be used to - protect the destination Bigtable cluster. The requirements - for this key are: - - 1) The Cloud Bigtable service account associated with the - project that contains this cluster must be granted the - ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the CMEK - key. - 2) Only regional keys can be used and the region of the CMEK - key must match the region of the cluster. Values are of - the form - ``projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}`` - """ - - kms_key_name: str = proto.Field( - proto.STRING, - number=1, - ) - - name: str = proto.Field( - proto.STRING, - number=1, - ) - location: str = proto.Field( - proto.STRING, - number=2, - ) - state: State = proto.Field( - proto.ENUM, - number=3, - enum=State, - ) - serve_nodes: int = proto.Field( - proto.INT32, - number=4, - ) - node_scaling_factor: NodeScalingFactor = proto.Field( - proto.ENUM, - number=9, - enum=NodeScalingFactor, - ) - cluster_config: ClusterConfig = proto.Field( - proto.MESSAGE, - number=7, - oneof="config", - message=ClusterConfig, - ) - default_storage_type: common.StorageType = proto.Field( - proto.ENUM, - number=5, - enum=common.StorageType, - ) - encryption_config: EncryptionConfig = proto.Field( - proto.MESSAGE, - number=6, - message=EncryptionConfig, - ) - - -class AppProfile(proto.Message): - r"""A configuration object describing how Cloud Bigtable should - treat traffic from a particular end user application. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - The unique name of the app profile. Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - etag (str): - Strongly validated etag for optimistic concurrency control. - Preserve the value returned from ``GetAppProfile`` when - calling ``UpdateAppProfile`` to fail the request if there - has been a modification in the mean time. The - ``update_mask`` of the request need not include ``etag`` for - this protection to apply. See - `Wikipedia `__ and - `RFC - 7232 `__ - for more details. - description (str): - Long form description of the use case for - this AppProfile. - multi_cluster_routing_use_any (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny): - Use a multi-cluster routing policy. - - This field is a member of `oneof`_ ``routing_policy``. - single_cluster_routing (google.cloud.bigtable_admin_v2.types.AppProfile.SingleClusterRouting): - Use a single-cluster routing policy. - - This field is a member of `oneof`_ ``routing_policy``. - priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority): - This field has been deprecated in favor of - ``standard_isolation.priority``. If you set this field, - ``standard_isolation.priority`` will be set instead. - - The priority of requests sent using this app profile. - - This field is a member of `oneof`_ ``isolation``. - standard_isolation (google.cloud.bigtable_admin_v2.types.AppProfile.StandardIsolation): - The standard options used for isolating this - app profile's traffic from other use cases. - - This field is a member of `oneof`_ ``isolation``. - data_boost_isolation_read_only (google.cloud.bigtable_admin_v2.types.AppProfile.DataBoostIsolationReadOnly): - Specifies that this app profile is intended - for read-only usage via the Data Boost feature. - - This field is a member of `oneof`_ ``isolation``. - """ - - class Priority(proto.Enum): - r"""Possible priorities for an app profile. Note that higher - priority writes can sometimes queue behind lower priority writes - to the same tablet, as writes must be strictly sequenced in the - durability log. - - Values: - PRIORITY_UNSPECIFIED (0): - Default value. Mapped to PRIORITY_HIGH (the legacy behavior) - on creation. - PRIORITY_LOW (1): - No description available. - PRIORITY_MEDIUM (2): - No description available. - PRIORITY_HIGH (3): - No description available. - """ - PRIORITY_UNSPECIFIED = 0 - PRIORITY_LOW = 1 - PRIORITY_MEDIUM = 2 - PRIORITY_HIGH = 3 - - class MultiClusterRoutingUseAny(proto.Message): - r"""Read/write requests are routed to the nearest cluster in the - instance, and will fail over to the nearest cluster that is - available in the event of transient errors or delays. Clusters - in a region are considered equidistant. Choosing this option - sacrifices read-your-writes consistency to improve availability. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - cluster_ids (MutableSequence[str]): - The set of clusters to route to. The order is - ignored; clusters will be tried in order of - distance. If left empty, all clusters are - eligible. - row_affinity (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny.RowAffinity): - Row affinity sticky routing based on the row - key of the request. Requests that span multiple - rows are routed non-deterministically. - - This field is a member of `oneof`_ ``affinity``. - """ - - class RowAffinity(proto.Message): - r"""If enabled, Bigtable will route the request based on the row - key of the request, rather than randomly. Instead, each row key - will be assigned to a cluster, and will stick to that cluster. - If clusters are added or removed, then this may affect which row - keys stick to which clusters. To avoid this, users can use a - cluster group to specify which clusters are to be used. In this - case, new clusters that are not a part of the cluster group will - not be routed to, and routing will be unaffected by the new - cluster. Moreover, clusters specified in the cluster group - cannot be deleted unless removed from the cluster group. - - """ - - cluster_ids: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=1, - ) - row_affinity: "AppProfile.MultiClusterRoutingUseAny.RowAffinity" = proto.Field( - proto.MESSAGE, - number=3, - oneof="affinity", - message="AppProfile.MultiClusterRoutingUseAny.RowAffinity", - ) - - class SingleClusterRouting(proto.Message): - r"""Unconditionally routes all read/write requests to a specific - cluster. This option preserves read-your-writes consistency but - does not improve availability. - - Attributes: - cluster_id (str): - The cluster to which read/write requests - should be routed. - allow_transactional_writes (bool): - Whether or not ``CheckAndMutateRow`` and - ``ReadModifyWriteRow`` requests are allowed by this app - profile. It is unsafe to send these requests to the same - table/row/column in multiple clusters. - """ - - cluster_id: str = proto.Field( - proto.STRING, - number=1, - ) - allow_transactional_writes: bool = proto.Field( - proto.BOOL, - number=2, - ) - - class StandardIsolation(proto.Message): - r"""Standard options for isolating this app profile's traffic - from other use cases. - - Attributes: - priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority): - The priority of requests sent using this app - profile. - """ - - priority: "AppProfile.Priority" = proto.Field( - proto.ENUM, - number=1, - enum="AppProfile.Priority", - ) - - class DataBoostIsolationReadOnly(proto.Message): - r"""Data Boost is a serverless compute capability that lets you - run high-throughput read jobs and queries on your Bigtable data, - without impacting the performance of the clusters that handle - your application traffic. Data Boost supports read-only use - cases with single-cluster routing. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - compute_billing_owner (google.cloud.bigtable_admin_v2.types.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner): - The Compute Billing Owner for this Data Boost - App Profile. - - This field is a member of `oneof`_ ``_compute_billing_owner``. - """ - - class ComputeBillingOwner(proto.Enum): - r"""Compute Billing Owner specifies how usage should be accounted - when using Data Boost. Compute Billing Owner also configures - which Cloud Project is charged for relevant quota. - - Values: - COMPUTE_BILLING_OWNER_UNSPECIFIED (0): - Unspecified value. - HOST_PAYS (1): - The host Cloud Project containing the - targeted Bigtable Instance / Table pays for - compute. - """ - COMPUTE_BILLING_OWNER_UNSPECIFIED = 0 - HOST_PAYS = 1 - - compute_billing_owner: "AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner" = proto.Field( - proto.ENUM, - number=1, - optional=True, - enum="AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner", - ) - - name: str = proto.Field( - proto.STRING, - number=1, - ) - etag: str = proto.Field( - proto.STRING, - number=2, - ) - description: str = proto.Field( - proto.STRING, - number=3, - ) - multi_cluster_routing_use_any: MultiClusterRoutingUseAny = proto.Field( - proto.MESSAGE, - number=5, - oneof="routing_policy", - message=MultiClusterRoutingUseAny, - ) - single_cluster_routing: SingleClusterRouting = proto.Field( - proto.MESSAGE, - number=6, - oneof="routing_policy", - message=SingleClusterRouting, - ) - priority: Priority = proto.Field( - proto.ENUM, - number=7, - oneof="isolation", - enum=Priority, - ) - standard_isolation: StandardIsolation = proto.Field( - proto.MESSAGE, - number=11, - oneof="isolation", - message=StandardIsolation, - ) - data_boost_isolation_read_only: DataBoostIsolationReadOnly = proto.Field( - proto.MESSAGE, - number=10, - oneof="isolation", - message=DataBoostIsolationReadOnly, - ) - - -class HotTablet(proto.Message): - r"""A tablet is a defined by a start and end key and is explained - in https://cloud.google.com/bigtable/docs/overview#architecture - and - https://cloud.google.com/bigtable/docs/performance#optimization. - A Hot tablet is a tablet that exhibits high average cpu usage - during the time interval from start time to end time. - - Attributes: - name (str): - The unique name of the hot tablet. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/[a-zA-Z0-9_-]*``. - table_name (str): - Name of the table that contains the tablet. Values are of - the form - ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The start time of the hot - tablet. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The end time of the hot tablet. - start_key (str): - Tablet Start Key (inclusive). - end_key (str): - Tablet End Key (inclusive). - node_cpu_usage_percent (float): - Output only. The average CPU usage spent by a node on this - tablet over the start_time to end_time time range. The - percentage is the amount of CPU used by the node to serve - the tablet, from 0% (tablet was not interacted with) to 100% - (the node spent all cycles serving the hot tablet). - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - table_name: str = proto.Field( - proto.STRING, - number=2, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - start_key: str = proto.Field( - proto.STRING, - number=5, - ) - end_key: str = proto.Field( - proto.STRING, - number=6, - ) - node_cpu_usage_percent: float = proto.Field( - proto.FLOAT, - number=7, - ) - - -class LogicalView(proto.Message): - r"""A SQL logical view object that can be referenced in SQL - queries. - - Attributes: - name (str): - Identifier. The unique name of the logical view. Format: - ``projects/{project}/instances/{instance}/logicalViews/{logical_view}`` - query (str): - Required. The logical view's select query. - etag (str): - Optional. The etag for this logical view. - This may be sent on update requests to ensure - that the client has an up-to-date value before - proceeding. The server returns an ABORTED error - on a mismatched etag. - deletion_protection (bool): - Optional. Set to true to make the LogicalView - protected against deletion. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - query: str = proto.Field( - proto.STRING, - number=2, - ) - etag: str = proto.Field( - proto.STRING, - number=3, - ) - deletion_protection: bool = proto.Field( - proto.BOOL, - number=6, - ) - - -class MaterializedView(proto.Message): - r"""A materialized view object that can be referenced in SQL - queries. - - Attributes: - name (str): - Identifier. The unique name of the materialized view. - Format: - ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}`` - query (str): - Required. Immutable. The materialized view's - select query. - etag (str): - Optional. The etag for this materialized - view. This may be sent on update requests to - ensure that the client has an up-to-date value - before proceeding. The server returns an ABORTED - error on a mismatched etag. - deletion_protection (bool): - Set to true to make the MaterializedView - protected against deletion. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - query: str = proto.Field( - proto.STRING, - number=2, - ) - etag: str = proto.Field( - proto.STRING, - number=3, - ) - deletion_protection: bool = proto.Field( - proto.BOOL, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/oneof_message.py b/google/cloud/bigtable_admin_v2/types/oneof_message.py deleted file mode 100644 index 0bb7cf551..000000000 --- a/google/cloud/bigtable_admin_v2/types/oneof_message.py +++ /dev/null @@ -1,111 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# AUTOGENERATED FILE: DO NOT EDIT. The source of truth for this file is under -# the gapic_templates directory, not this one. - -import collections.abc -import proto - - -class OneofMessage(proto.Message): - def _get_oneof_field_from_key(self, key): - """Given a field name, return the corresponding oneof associated with it. If it doesn't exist, return None.""" - - oneof_type = None - - try: - oneof_type = self._meta.fields[key].oneof - except KeyError: - # Underscores may be appended to field names - # that collide with python or proto-plus keywords. - # In case a key only exists with a `_` suffix, coerce the key - # to include the `_` suffix. It's not possible to - # natively define the same field with a trailing underscore in protobuf. - # See related issue - # https://github.com/googleapis/python-api-core/issues/227 - if f"{key}_" in self._meta.fields: - key = f"{key}_" - oneof_type = self._meta.fields[key].oneof - - return oneof_type - - def __init__( - self, - mapping=None, - *, - ignore_unknown_fields=False, - **kwargs, - ): - # We accept several things for `mapping`: - # * An instance of this class. - # * An instance of the underlying protobuf descriptor class. - # * A dict - # * Nothing (keyword arguments only). - # - # - # Check for oneofs collisions in the parameters provided. Extract a set of - # all fields that are set from the mappings + kwargs combined. - mapping_fields = set(kwargs.keys()) - - if mapping is None: - pass - elif isinstance(mapping, collections.abc.Mapping): - mapping_fields.update(mapping.keys()) - elif isinstance(mapping, self._meta.pb): - mapping_fields.update(field.name for field, _ in mapping.ListFields()) - elif isinstance(mapping, type(self)): - mapping_fields.update(field.name for field, _ in mapping._pb.ListFields()) - else: - # Sanity check: Did we get something not a map? Error if so. - raise TypeError( - "Invalid constructor input for %s: %r" - % ( - self.__class__.__name__, - mapping, - ) - ) - - oneofs = set() - - for field in mapping_fields: - oneof_field = self._get_oneof_field_from_key(field) - if oneof_field is not None: - if oneof_field in oneofs: - raise ValueError( - "Invalid constructor input for %s: Multiple fields defined for oneof %s" - % (self.__class__.__name__, oneof_field) - ) - else: - oneofs.add(oneof_field) - - super().__init__(mapping, ignore_unknown_fields=ignore_unknown_fields, **kwargs) - - def __setattr__(self, key, value): - # Oneof check: Only set the value of an existing oneof field - # if the field being overridden is the same as the field already set - # for the oneof. - oneof = self._get_oneof_field_from_key(key) - if ( - oneof is not None - and self._pb.HasField(oneof) - and self._pb.WhichOneof(oneof) != key - ): - raise ValueError( - "Overriding the field set for oneof %s with a different field %s" - % (oneof, key) - ) - super().__setattr__(key, value) diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py deleted file mode 100644 index ec5d74d36..000000000 --- a/google/cloud/bigtable_admin_v2/types/table.py +++ /dev/null @@ -1,1114 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.bigtable_admin_v2.types import types -from google.cloud.bigtable_admin_v2.types import oneof_message -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package="google.bigtable.admin.v2", - manifest={ - "RestoreSourceType", - "RestoreInfo", - "ChangeStreamConfig", - "Table", - "AuthorizedView", - "ColumnFamily", - "GcRule", - "EncryptionInfo", - "Snapshot", - "Backup", - "BackupInfo", - "ProtoSchema", - "SchemaBundle", - }, -) - - -class RestoreSourceType(proto.Enum): - r"""Indicates the type of the restore source. - - Values: - RESTORE_SOURCE_TYPE_UNSPECIFIED (0): - No restore associated. - BACKUP (1): - A backup was used as the source of the - restore. - """ - RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 - BACKUP = 1 - - -class RestoreInfo(proto.Message): - r"""Information about a table restore. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): - The type of the restore source. - backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): - Information about the backup used to restore - the table. The backup may no longer exist. - - This field is a member of `oneof`_ ``source_info``. - """ - - source_type: "RestoreSourceType" = proto.Field( - proto.ENUM, - number=1, - enum="RestoreSourceType", - ) - backup_info: "BackupInfo" = proto.Field( - proto.MESSAGE, - number=2, - oneof="source_info", - message="BackupInfo", - ) - - -class ChangeStreamConfig(proto.Message): - r"""Change stream configuration. - - Attributes: - retention_period (google.protobuf.duration_pb2.Duration): - How long the change stream should be - retained. Change stream data older than the - retention period will not be returned when - reading the change stream from the table. - Values must be at least 1 day and at most 7 - days, and will be truncated to microsecond - granularity. - """ - - retention_period: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - - -class Table(proto.Message): - r"""A collection of user data indexed by row, column, and - timestamp. Each table is served using the resources of its - parent cluster. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - The unique name of the table. Values are of the form - ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - Views: ``NAME_ONLY``, ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, - ``FULL`` - cluster_states (MutableMapping[str, google.cloud.bigtable_admin_v2.types.Table.ClusterState]): - Output only. Map from cluster ID to per-cluster table state. - If it could not be determined whether or not the table has - data in a particular cluster (for example, if its zone is - unavailable), then there will be an entry for the cluster - with UNKNOWN ``replication_status``. Views: - ``REPLICATION_VIEW``, ``ENCRYPTION_VIEW``, ``FULL`` - column_families (MutableMapping[str, google.cloud.bigtable_admin_v2.types.ColumnFamily]): - The column families configured for this table, mapped by - column family ID. Views: ``SCHEMA_VIEW``, ``STATS_VIEW``, - ``FULL`` - granularity (google.cloud.bigtable_admin_v2.types.Table.TimestampGranularity): - Immutable. The granularity (i.e. ``MILLIS``) at which - timestamps are stored in this table. Timestamps not matching - the granularity will be rejected. If unspecified at creation - time, the value will be set to ``MILLIS``. Views: - ``SCHEMA_VIEW``, ``FULL``. - restore_info (google.cloud.bigtable_admin_v2.types.RestoreInfo): - Output only. If this table was restored from - another data source (e.g. a backup), this field - will be populated with information about the - restore. - change_stream_config (google.cloud.bigtable_admin_v2.types.ChangeStreamConfig): - If specified, enable the change stream on - this table. Otherwise, the change stream is - disabled and the change stream is not retained. - deletion_protection (bool): - Set to true to make the table protected against data loss. - i.e. deleting the following resources through Admin APIs are - prohibited: - - - The table. - - The column families in the table. - - The instance containing the table. - - Note one can still delete the data stored in the table - through Data APIs. - automated_backup_policy (google.cloud.bigtable_admin_v2.types.Table.AutomatedBackupPolicy): - If specified, automated backups are enabled - for this table. Otherwise, automated backups are - disabled. - - This field is a member of `oneof`_ ``automated_backup_config``. - row_key_schema (google.cloud.bigtable_admin_v2.types.Type.Struct): - The row key schema for this table. The schema is used to - decode the raw row key bytes into a structured format. The - order of field declarations in this schema is important, as - it reflects how the raw row key bytes are structured. - Currently, this only affects how the key is read via a - GoogleSQL query from the ExecuteQuery API. - - For a SQL query, the \_key column is still read as raw - bytes. But queries can reference the key fields by name, - which will be decoded from \_key using provided type and - encoding. Queries that reference key fields will fail if - they encounter an invalid row key. - - For example, if \_key = - "some_id#2024-04-30#\x00\x13\x00\xf3" with the following - schema: - - .. code-block:: - - { - fields { - field_name: "id" - type { string { encoding: utf8_bytes {} } } - } - fields { - field_name: "date" - type { string { encoding: utf8_bytes {} } } - } - fields { - field_name: "product_code" - type { int64 { encoding: big_endian_bytes {} } } - } - encoding { delimited_bytes { delimiter: "#" } } - } - - The decoded key parts would be: - id = "some_id", date = "2024-04-30", product_code = 1245427 - The query "SELECT \_key, product_code FROM table" will return - two columns: - - +========================================+==============+ - | \_key | product_code | - +========================================+==============+ - | "some_id#2024-04-30#\x00\x13\x00\xf3" | 1245427 | - +----------------------------------------+--------------+ - - The schema has the following invariants: (1) The decoded - field values are order-preserved. For read, the field values - will be decoded in sorted mode from the raw bytes. (2) Every - field in the schema must specify a non-empty name. (3) Every - field must specify a type with an associated encoding. The - type is limited to scalar types only: Array, Map, Aggregate, - and Struct are not allowed. (4) The field names must not - collide with existing column family names and reserved - keywords "_key" and "_timestamp". - - The following update operations are allowed for - row_key_schema: - - - Update from an empty schema to a new schema. - - Remove the existing schema. This operation requires - setting the ``ignore_warnings`` flag to ``true``, since - it might be a backward incompatible change. Without the - flag, the update request will fail with an - INVALID_ARGUMENT error. Any other row key schema update - operation (e.g. update existing schema columns names or - types) is currently unsupported. - """ - - class TimestampGranularity(proto.Enum): - r"""Possible timestamp granularities to use when keeping multiple - versions of data in a table. - - Values: - TIMESTAMP_GRANULARITY_UNSPECIFIED (0): - The user did not specify a granularity. - Should not be returned. When specified during - table creation, MILLIS will be used. - MILLIS (1): - The table keeps data versioned at a - granularity of 1ms. - """ - TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 - MILLIS = 1 - - class View(proto.Enum): - r"""Defines a view over a table's fields. - - Values: - VIEW_UNSPECIFIED (0): - Uses the default view for each method as - documented in its request. - NAME_ONLY (1): - Only populates ``name``. - SCHEMA_VIEW (2): - Only populates ``name`` and fields related to the table's - schema. - REPLICATION_VIEW (3): - Only populates ``name`` and fields related to the table's - replication state. - ENCRYPTION_VIEW (5): - Only populates ``name`` and fields related to the table's - encryption state. - FULL (4): - Populates all fields. - """ - VIEW_UNSPECIFIED = 0 - NAME_ONLY = 1 - SCHEMA_VIEW = 2 - REPLICATION_VIEW = 3 - ENCRYPTION_VIEW = 5 - FULL = 4 - - class ClusterState(proto.Message): - r"""The state of a table's data in a particular cluster. - - Attributes: - replication_state (google.cloud.bigtable_admin_v2.types.Table.ClusterState.ReplicationState): - Output only. The state of replication for the - table in this cluster. - encryption_info (MutableSequence[google.cloud.bigtable_admin_v2.types.EncryptionInfo]): - Output only. The encryption information for - the table in this cluster. If the encryption key - protecting this resource is customer managed, - then its version can be rotated in Cloud Key - Management Service (Cloud KMS). The primary - version of the key and its status will be - reflected here when changes propagate from Cloud - KMS. - """ - - class ReplicationState(proto.Enum): - r"""Table replication states. - - Values: - STATE_NOT_KNOWN (0): - The replication state of the table is unknown - in this cluster. - INITIALIZING (1): - The cluster was recently created, and the - table must finish copying over pre-existing data - from other clusters before it can begin - receiving live replication updates and serving - Data API requests. - PLANNED_MAINTENANCE (2): - The table is temporarily unable to serve Data - API requests from this cluster due to planned - internal maintenance. - UNPLANNED_MAINTENANCE (3): - The table is temporarily unable to serve Data - API requests from this cluster due to unplanned - or emergency maintenance. - READY (4): - The table can serve Data API requests from - this cluster. Depending on replication delay, - reads may not immediately reflect the state of - the table in other clusters. - READY_OPTIMIZING (5): - The table is fully created and ready for use after a - restore, and is being optimized for performance. When - optimizations are complete, the table will transition to - ``READY`` state. - """ - STATE_NOT_KNOWN = 0 - INITIALIZING = 1 - PLANNED_MAINTENANCE = 2 - UNPLANNED_MAINTENANCE = 3 - READY = 4 - READY_OPTIMIZING = 5 - - replication_state: "Table.ClusterState.ReplicationState" = proto.Field( - proto.ENUM, - number=1, - enum="Table.ClusterState.ReplicationState", - ) - encryption_info: MutableSequence["EncryptionInfo"] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message="EncryptionInfo", - ) - - class AutomatedBackupPolicy(proto.Message): - r"""Defines an automated backup policy for a table - - Attributes: - retention_period (google.protobuf.duration_pb2.Duration): - Required. How long the automated backups - should be retained. The only supported value at - this time is 3 days. - frequency (google.protobuf.duration_pb2.Duration): - Required. How frequently automated backups - should occur. The only supported value at this - time is 24 hours. - """ - - retention_period: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - frequency: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=2, - message=duration_pb2.Duration, - ) - - name: str = proto.Field( - proto.STRING, - number=1, - ) - cluster_states: MutableMapping[str, ClusterState] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=2, - message=ClusterState, - ) - column_families: MutableMapping[str, "ColumnFamily"] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=3, - message="ColumnFamily", - ) - granularity: TimestampGranularity = proto.Field( - proto.ENUM, - number=4, - enum=TimestampGranularity, - ) - restore_info: "RestoreInfo" = proto.Field( - proto.MESSAGE, - number=6, - message="RestoreInfo", - ) - change_stream_config: "ChangeStreamConfig" = proto.Field( - proto.MESSAGE, - number=8, - message="ChangeStreamConfig", - ) - deletion_protection: bool = proto.Field( - proto.BOOL, - number=9, - ) - automated_backup_policy: AutomatedBackupPolicy = proto.Field( - proto.MESSAGE, - number=13, - oneof="automated_backup_config", - message=AutomatedBackupPolicy, - ) - row_key_schema: types.Type.Struct = proto.Field( - proto.MESSAGE, - number=15, - message=types.Type.Struct, - ) - - -class AuthorizedView(proto.Message): - r"""AuthorizedViews represent subsets of a particular Cloud - Bigtable table. Users can configure access to each Authorized - View independently from the table and use the existing Data APIs - to access the subset of data. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - Identifier. The name of this AuthorizedView. Values are of - the form - ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}`` - subset_view (google.cloud.bigtable_admin_v2.types.AuthorizedView.SubsetView): - An AuthorizedView permitting access to an - explicit subset of a Table. - - This field is a member of `oneof`_ ``authorized_view``. - etag (str): - The etag for this AuthorizedView. - If this is provided on update, it must match the - server's etag. The server returns ABORTED error - on a mismatched etag. - deletion_protection (bool): - Set to true to make the AuthorizedView - protected against deletion. The parent Table and - containing Instance cannot be deleted if an - AuthorizedView has this bit set. - """ - - class ResponseView(proto.Enum): - r"""Defines a subset of an AuthorizedView's fields. - - Values: - RESPONSE_VIEW_UNSPECIFIED (0): - Uses the default view for each method as - documented in the request. - NAME_ONLY (1): - Only populates ``name``. - BASIC (2): - Only populates the AuthorizedView's basic metadata. This - includes: name, deletion_protection, etag. - FULL (3): - Populates every fields. - """ - RESPONSE_VIEW_UNSPECIFIED = 0 - NAME_ONLY = 1 - BASIC = 2 - FULL = 3 - - class FamilySubsets(proto.Message): - r"""Subsets of a column family that are included in this - AuthorizedView. - - Attributes: - qualifiers (MutableSequence[bytes]): - Individual exact column qualifiers to be - included in the AuthorizedView. - qualifier_prefixes (MutableSequence[bytes]): - Prefixes for qualifiers to be included in the - AuthorizedView. Every qualifier starting with - one of these prefixes is included in the - AuthorizedView. To provide access to all - qualifiers, include the empty string as a prefix - (""). - """ - - qualifiers: MutableSequence[bytes] = proto.RepeatedField( - proto.BYTES, - number=1, - ) - qualifier_prefixes: MutableSequence[bytes] = proto.RepeatedField( - proto.BYTES, - number=2, - ) - - class SubsetView(proto.Message): - r"""Defines a simple AuthorizedView that is a subset of the - underlying Table. - - Attributes: - row_prefixes (MutableSequence[bytes]): - Row prefixes to be included in the - AuthorizedView. To provide access to all rows, - include the empty string as a prefix (""). - family_subsets (MutableMapping[str, google.cloud.bigtable_admin_v2.types.AuthorizedView.FamilySubsets]): - Map from column family name to the columns in - this family to be included in the - AuthorizedView. - """ - - row_prefixes: MutableSequence[bytes] = proto.RepeatedField( - proto.BYTES, - number=1, - ) - family_subsets: MutableMapping[ - str, "AuthorizedView.FamilySubsets" - ] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=2, - message="AuthorizedView.FamilySubsets", - ) - - name: str = proto.Field( - proto.STRING, - number=1, - ) - subset_view: SubsetView = proto.Field( - proto.MESSAGE, - number=2, - oneof="authorized_view", - message=SubsetView, - ) - etag: str = proto.Field( - proto.STRING, - number=3, - ) - deletion_protection: bool = proto.Field( - proto.BOOL, - number=4, - ) - - -class ColumnFamily(proto.Message): - r"""A set of columns within a table which share a common - configuration. - - Attributes: - gc_rule (google.cloud.bigtable_admin_v2.types.GcRule): - Garbage collection rule specified as a - protobuf. Must serialize to at most 500 bytes. - - NOTE: Garbage collection executes - opportunistically in the background, and so it's - possible for reads to return a cell even if it - matches the active GC expression for its family. - value_type (google.cloud.bigtable_admin_v2.types.Type): - The type of data stored in each of this family's cell - values, including its full encoding. If omitted, the family - only serves raw untyped bytes. - - For now, only the ``Aggregate`` type is supported. - - ``Aggregate`` can only be set at family creation and is - immutable afterwards. - - If ``value_type`` is ``Aggregate``, written data must be - compatible with: - - - ``value_type.input_type`` for ``AddInput`` mutations - """ - - gc_rule: "GcRule" = proto.Field( - proto.MESSAGE, - number=1, - message="GcRule", - ) - value_type: types.Type = proto.Field( - proto.MESSAGE, - number=3, - message=types.Type, - ) - - -class GcRule(oneof_message.OneofMessage): - r"""Rule for determining which cells to delete during garbage - collection. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - max_num_versions (int): - Delete all cells in a column except the most - recent N. - - This field is a member of `oneof`_ ``rule``. - max_age (google.protobuf.duration_pb2.Duration): - Delete cells in a column older than the given - age. Values must be at least one millisecond, - and will be truncated to microsecond - granularity. - - This field is a member of `oneof`_ ``rule``. - intersection (google.cloud.bigtable_admin_v2.types.GcRule.Intersection): - Delete cells that would be deleted by every - nested rule. - - This field is a member of `oneof`_ ``rule``. - union (google.cloud.bigtable_admin_v2.types.GcRule.Union): - Delete cells that would be deleted by any - nested rule. - - This field is a member of `oneof`_ ``rule``. - """ - - class Intersection(proto.Message): - r"""A GcRule which deletes cells matching all of the given rules. - - Attributes: - rules (MutableSequence[google.cloud.bigtable_admin_v2.types.GcRule]): - Only delete cells which would be deleted by every element of - ``rules``. - """ - - rules: MutableSequence["GcRule"] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message="GcRule", - ) - - class Union(proto.Message): - r"""A GcRule which deletes cells matching any of the given rules. - - Attributes: - rules (MutableSequence[google.cloud.bigtable_admin_v2.types.GcRule]): - Delete cells which would be deleted by any element of - ``rules``. - """ - - rules: MutableSequence["GcRule"] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message="GcRule", - ) - - max_num_versions: int = proto.Field( - proto.INT32, - number=1, - oneof="rule", - ) - max_age: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=2, - oneof="rule", - message=duration_pb2.Duration, - ) - intersection: Intersection = proto.Field( - proto.MESSAGE, - number=3, - oneof="rule", - message=Intersection, - ) - union: Union = proto.Field( - proto.MESSAGE, - number=4, - oneof="rule", - message=Union, - ) - - -class EncryptionInfo(proto.Message): - r"""Encryption information for a given resource. - If this resource is protected with customer managed encryption, - the in-use Cloud Key Management Service (Cloud KMS) key version - is specified along with its status. - - Attributes: - encryption_type (google.cloud.bigtable_admin_v2.types.EncryptionInfo.EncryptionType): - Output only. The type of encryption used to - protect this resource. - encryption_status (google.rpc.status_pb2.Status): - Output only. The status of encrypt/decrypt - calls on underlying data for this resource. - Regardless of status, the existing data is - always encrypted at rest. - kms_key_version (str): - Output only. The version of the Cloud KMS key - specified in the parent cluster that is in use - for the data underlying this table. - """ - - class EncryptionType(proto.Enum): - r"""Possible encryption types for a resource. - - Values: - ENCRYPTION_TYPE_UNSPECIFIED (0): - Encryption type was not specified, though - data at rest remains encrypted. - GOOGLE_DEFAULT_ENCRYPTION (1): - The data backing this resource is encrypted - at rest with a key that is fully managed by - Google. No key version or status will be - populated. This is the default state. - CUSTOMER_MANAGED_ENCRYPTION (2): - The data backing this resource is encrypted at rest with a - key that is managed by the customer. The in-use version of - the key and its status are populated for CMEK-protected - tables. CMEK-protected backups are pinned to the key version - that was in use at the time the backup was taken. This key - version is populated but its status is not tracked and is - reported as ``UNKNOWN``. - """ - ENCRYPTION_TYPE_UNSPECIFIED = 0 - GOOGLE_DEFAULT_ENCRYPTION = 1 - CUSTOMER_MANAGED_ENCRYPTION = 2 - - encryption_type: EncryptionType = proto.Field( - proto.ENUM, - number=3, - enum=EncryptionType, - ) - encryption_status: status_pb2.Status = proto.Field( - proto.MESSAGE, - number=4, - message=status_pb2.Status, - ) - kms_key_version: str = proto.Field( - proto.STRING, - number=2, - ) - - -class Snapshot(proto.Message): - r"""A snapshot of a table at a particular time. A snapshot can be - used as a checkpoint for data restoration or a data source for a - new table. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to most Cloud - Bigtable customers. This feature might be changed in - backward-incompatible ways and is not recommended for production - use. It is not subject to any SLA or deprecation policy. - - Attributes: - name (str): - The unique name of the snapshot. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - source_table (google.cloud.bigtable_admin_v2.types.Table): - Output only. The source table at the time the - snapshot was taken. - data_size_bytes (int): - Output only. The size of the data in the - source table at the time the snapshot was taken. - In some cases, this value may be computed - asynchronously via a background process and a - placeholder of 0 will be used in the meantime. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time when the snapshot is - created. - delete_time (google.protobuf.timestamp_pb2.Timestamp): - The time when the snapshot will be deleted. - The maximum amount of time a snapshot can stay - active is 365 days. If 'ttl' is not specified, - the default maximum of 365 days will be used. - state (google.cloud.bigtable_admin_v2.types.Snapshot.State): - Output only. The current state of the - snapshot. - description (str): - Description of the snapshot. - """ - - class State(proto.Enum): - r"""Possible states of a snapshot. - - Values: - STATE_NOT_KNOWN (0): - The state of the snapshot could not be - determined. - READY (1): - The snapshot has been successfully created - and can serve all requests. - CREATING (2): - The snapshot is currently being created, and - may be destroyed if the creation process - encounters an error. A snapshot may not be - restored to a table while it is being created. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - name: str = proto.Field( - proto.STRING, - number=1, - ) - source_table: "Table" = proto.Field( - proto.MESSAGE, - number=2, - message="Table", - ) - data_size_bytes: int = proto.Field( - proto.INT64, - number=3, - ) - create_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - delete_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - state: State = proto.Field( - proto.ENUM, - number=6, - enum=State, - ) - description: str = proto.Field( - proto.STRING, - number=7, - ) - - -class Backup(proto.Message): - r"""A backup of a Cloud Bigtable table. - - Attributes: - name (str): - A globally unique identifier for the backup which cannot be - changed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/ backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` - The final segment of the name must be between 1 and 50 - characters in length. - - The backup is stored in the cluster identified by the prefix - of the backup name of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - source_table (str): - Required. Immutable. Name of the table from which this - backup was created. This needs to be in the same instance as - the backup. Values are of the form - ``projects/{project}/instances/{instance}/tables/{source_table}``. - source_backup (str): - Output only. Name of the backup from which - this backup was copied. If a backup is not - created by copying a backup, this field will be - empty. Values are of the form: - - projects//instances//clusters//backups/ - expire_time (google.protobuf.timestamp_pb2.Timestamp): - Required. The expiration time of the backup. When creating a - backup or updating its ``expire_time``, the value must be - greater than the backup creation time by: - - - At least 6 hours - - At most 90 days - - Once the ``expire_time`` has passed, Cloud Bigtable will - delete the backup. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. ``start_time`` is the time that the backup was - started (i.e. approximately the time the - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup] - request is received). The row data in this backup will be no - older than this timestamp. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. ``end_time`` is the time that the backup was - finished. The row data in the backup will be no newer than - this timestamp. - size_bytes (int): - Output only. Size of the backup in bytes. - state (google.cloud.bigtable_admin_v2.types.Backup.State): - Output only. The current state of the backup. - encryption_info (google.cloud.bigtable_admin_v2.types.EncryptionInfo): - Output only. The encryption information for - the backup. - backup_type (google.cloud.bigtable_admin_v2.types.Backup.BackupType): - Indicates the backup type of the backup. - hot_to_standard_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the hot backup will be converted to a - standard backup. Once the ``hot_to_standard_time`` has - passed, Cloud Bigtable will convert the hot backup to a - standard backup. This value must be greater than the backup - creation time by: - - - At least 24 hours - - This field only applies for hot backups. When creating or - updating a standard backup, attempting to set this field - will fail the request. - """ - - class State(proto.Enum): - r"""Indicates the current state of the backup. - - Values: - STATE_UNSPECIFIED (0): - Not specified. - CREATING (1): - The pending backup is still being created. Operations on the - backup may fail with ``FAILED_PRECONDITION`` in this state. - READY (2): - The backup is complete and ready for use. - """ - STATE_UNSPECIFIED = 0 - CREATING = 1 - READY = 2 - - class BackupType(proto.Enum): - r"""The type of the backup. - - Values: - BACKUP_TYPE_UNSPECIFIED (0): - Not specified. - STANDARD (1): - The default type for Cloud Bigtable managed - backups. Supported for backups created in both - HDD and SSD instances. Requires optimization - when restored to a table in an SSD instance. - HOT (2): - A backup type with faster restore to SSD - performance. Only supported for backups created - in SSD instances. A new SSD table restored from - a hot backup reaches production performance more - quickly than a standard backup. - """ - BACKUP_TYPE_UNSPECIFIED = 0 - STANDARD = 1 - HOT = 2 - - name: str = proto.Field( - proto.STRING, - number=1, - ) - source_table: str = proto.Field( - proto.STRING, - number=2, - ) - source_backup: str = proto.Field( - proto.STRING, - number=10, - ) - expire_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - size_bytes: int = proto.Field( - proto.INT64, - number=6, - ) - state: State = proto.Field( - proto.ENUM, - number=7, - enum=State, - ) - encryption_info: "EncryptionInfo" = proto.Field( - proto.MESSAGE, - number=9, - message="EncryptionInfo", - ) - backup_type: BackupType = proto.Field( - proto.ENUM, - number=11, - enum=BackupType, - ) - hot_to_standard_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - - -class BackupInfo(proto.Message): - r"""Information about a backup. - - Attributes: - backup (str): - Output only. Name of the backup. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time that the backup was - started. Row data in the backup will be no older - than this timestamp. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. This time that the backup was - finished. Row data in the backup will be no - newer than this timestamp. - source_table (str): - Output only. Name of the table the backup was - created from. - source_backup (str): - Output only. Name of the backup from which - this backup was copied. If a backup is not - created by copying a backup, this field will be - empty. Values are of the form: - - projects//instances//clusters//backups/ - """ - - backup: str = proto.Field( - proto.STRING, - number=1, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - source_table: str = proto.Field( - proto.STRING, - number=4, - ) - source_backup: str = proto.Field( - proto.STRING, - number=10, - ) - - -class ProtoSchema(proto.Message): - r"""Represents a protobuf schema. - - Attributes: - proto_descriptors (bytes): - Required. Contains a protobuf-serialized - `google.protobuf.FileDescriptorSet `__, - which could include multiple proto files. To generate it, - `install `__ and - run ``protoc`` with ``--include_imports`` and - ``--descriptor_set_out``. For example, to generate for - moon/shot/app.proto, run - - :: - - $protoc --proto_path=/app_path --proto_path=/lib_path \ - --include_imports \ - --descriptor_set_out=descriptors.pb \ - moon/shot/app.proto - - For more details, see protobuffer `self - description `__. - """ - - proto_descriptors: bytes = proto.Field( - proto.BYTES, - number=2, - ) - - -class SchemaBundle(proto.Message): - r"""A named collection of related schemas. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - Identifier. The unique name identifying this schema bundle. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - proto_schema (google.cloud.bigtable_admin_v2.types.ProtoSchema): - Schema for Protobufs. - - This field is a member of `oneof`_ ``type``. - etag (str): - Optional. The etag for this schema bundle. - This may be sent on update and delete requests - to ensure the client has an up-to-date value - before proceeding. The server returns an ABORTED - error on a mismatched etag. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - proto_schema: "ProtoSchema" = proto.Field( - proto.MESSAGE, - number=2, - oneof="type", - message="ProtoSchema", - ) - etag: str = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/types.py b/google/cloud/bigtable_admin_v2/types/types.py deleted file mode 100644 index b6ea5341d..000000000 --- a/google/cloud/bigtable_admin_v2/types/types.py +++ /dev/null @@ -1,842 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package="google.bigtable.admin.v2", - manifest={ - "Type", - }, -) - - -class Type(proto.Message): - r"""``Type`` represents the type of data that is written to, read from, - or stored in Bigtable. It is heavily based on the GoogleSQL standard - to help maintain familiarity and consistency across products and - features. - - For compatibility with Bigtable's existing untyped APIs, each - ``Type`` includes an ``Encoding`` which describes how to convert to - or from the underlying data. - - Each encoding can operate in one of two modes: - - - Sorted: In this mode, Bigtable guarantees that - ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is - useful anywhere sort order is important, for example when - encoding keys. - - Distinct: In this mode, Bigtable guarantees that if ``X != Y`` - then ``Encode(X) != Encode(Y)``. However, the converse is not - guaranteed. For example, both "{'foo': '1', 'bar': '2'}" and - "{'bar': '2', 'foo': '1'}" are valid encodings of the same JSON - value. - - The API clearly documents which mode is used wherever an encoding - can be configured. Each encoding also documents which values are - supported in which modes. For example, when encoding INT64 as a - numeric STRING, negative numbers cannot be encoded in sorted mode. - This is because ``INT64(1) > INT64(-1)``, but - ``STRING("-00001") > STRING("00001")``. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes): - Bytes - - This field is a member of `oneof`_ ``kind``. - string_type (google.cloud.bigtable_admin_v2.types.Type.String): - String - - This field is a member of `oneof`_ ``kind``. - int64_type (google.cloud.bigtable_admin_v2.types.Type.Int64): - Int64 - - This field is a member of `oneof`_ ``kind``. - float32_type (google.cloud.bigtable_admin_v2.types.Type.Float32): - Float32 - - This field is a member of `oneof`_ ``kind``. - float64_type (google.cloud.bigtable_admin_v2.types.Type.Float64): - Float64 - - This field is a member of `oneof`_ ``kind``. - bool_type (google.cloud.bigtable_admin_v2.types.Type.Bool): - Bool - - This field is a member of `oneof`_ ``kind``. - timestamp_type (google.cloud.bigtable_admin_v2.types.Type.Timestamp): - Timestamp - - This field is a member of `oneof`_ ``kind``. - date_type (google.cloud.bigtable_admin_v2.types.Type.Date): - Date - - This field is a member of `oneof`_ ``kind``. - aggregate_type (google.cloud.bigtable_admin_v2.types.Type.Aggregate): - Aggregate - - This field is a member of `oneof`_ ``kind``. - struct_type (google.cloud.bigtable_admin_v2.types.Type.Struct): - Struct - - This field is a member of `oneof`_ ``kind``. - array_type (google.cloud.bigtable_admin_v2.types.Type.Array): - Array - - This field is a member of `oneof`_ ``kind``. - map_type (google.cloud.bigtable_admin_v2.types.Type.Map): - Map - - This field is a member of `oneof`_ ``kind``. - proto_type (google.cloud.bigtable_admin_v2.types.Type.Proto): - Proto - - This field is a member of `oneof`_ ``kind``. - enum_type (google.cloud.bigtable_admin_v2.types.Type.Enum): - Enum - - This field is a member of `oneof`_ ``kind``. - """ - - class Bytes(proto.Message): - r"""Bytes Values of type ``Bytes`` are stored in ``Value.bytes_value``. - - Attributes: - encoding (google.cloud.bigtable_admin_v2.types.Type.Bytes.Encoding): - The encoding to use when converting to or - from lower level types. - """ - - class Encoding(proto.Message): - r"""Rules used to convert to or from lower level types. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - raw (google.cloud.bigtable_admin_v2.types.Type.Bytes.Encoding.Raw): - Use ``Raw`` encoding. - - This field is a member of `oneof`_ ``encoding``. - """ - - class Raw(proto.Message): - r"""Leaves the value as-is. - - Sorted mode: all values are supported. - - Distinct mode: all values are supported. - - """ - - raw: "Type.Bytes.Encoding.Raw" = proto.Field( - proto.MESSAGE, - number=1, - oneof="encoding", - message="Type.Bytes.Encoding.Raw", - ) - - encoding: "Type.Bytes.Encoding" = proto.Field( - proto.MESSAGE, - number=1, - message="Type.Bytes.Encoding", - ) - - class String(proto.Message): - r"""String Values of type ``String`` are stored in - ``Value.string_value``. - - Attributes: - encoding (google.cloud.bigtable_admin_v2.types.Type.String.Encoding): - The encoding to use when converting to or - from lower level types. - """ - - class Encoding(proto.Message): - r"""Rules used to convert to or from lower level types. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - utf8_raw (google.cloud.bigtable_admin_v2.types.Type.String.Encoding.Utf8Raw): - Deprecated: if set, converts to an empty ``utf8_bytes``. - - This field is a member of `oneof`_ ``encoding``. - utf8_bytes (google.cloud.bigtable_admin_v2.types.Type.String.Encoding.Utf8Bytes): - Use ``Utf8Bytes`` encoding. - - This field is a member of `oneof`_ ``encoding``. - """ - - class Utf8Raw(proto.Message): - r"""Deprecated: prefer the equivalent ``Utf8Bytes``.""" - - class Utf8Bytes(proto.Message): - r"""UTF-8 encoding. - - Sorted mode: - - - All values are supported. - - Code point order is preserved. - - Distinct mode: all values are supported. - - Compatible with: - - - BigQuery ``TEXT`` encoding - - HBase ``Bytes.toBytes`` - - Java ``String#getBytes(StandardCharsets.UTF_8)`` - - """ - - utf8_raw: "Type.String.Encoding.Utf8Raw" = proto.Field( - proto.MESSAGE, - number=1, - oneof="encoding", - message="Type.String.Encoding.Utf8Raw", - ) - utf8_bytes: "Type.String.Encoding.Utf8Bytes" = proto.Field( - proto.MESSAGE, - number=2, - oneof="encoding", - message="Type.String.Encoding.Utf8Bytes", - ) - - encoding: "Type.String.Encoding" = proto.Field( - proto.MESSAGE, - number=1, - message="Type.String.Encoding", - ) - - class Int64(proto.Message): - r"""Int64 Values of type ``Int64`` are stored in ``Value.int_value``. - - Attributes: - encoding (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding): - The encoding to use when converting to or - from lower level types. - """ - - class Encoding(proto.Message): - r"""Rules used to convert to or from lower level types. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - big_endian_bytes (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding.BigEndianBytes): - Use ``BigEndianBytes`` encoding. - - This field is a member of `oneof`_ ``encoding``. - ordered_code_bytes (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding.OrderedCodeBytes): - Use ``OrderedCodeBytes`` encoding. - - This field is a member of `oneof`_ ``encoding``. - """ - - class BigEndianBytes(proto.Message): - r"""Encodes the value as an 8-byte big-endian two's complement value. - - Sorted mode: non-negative values are supported. - - Distinct mode: all values are supported. - - Compatible with: - - - BigQuery ``BINARY`` encoding - - HBase ``Bytes.toBytes`` - - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN`` - - Attributes: - bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes): - Deprecated: ignored if set. - """ - - bytes_type: "Type.Bytes" = proto.Field( - proto.MESSAGE, - number=1, - message="Type.Bytes", - ) - - class OrderedCodeBytes(proto.Message): - r"""Encodes the value in a variable length binary format of up to - 10 bytes. Values that are closer to zero use fewer bytes. - - Sorted mode: all values are supported. - - Distinct mode: all values are supported. - - """ - - big_endian_bytes: "Type.Int64.Encoding.BigEndianBytes" = proto.Field( - proto.MESSAGE, - number=1, - oneof="encoding", - message="Type.Int64.Encoding.BigEndianBytes", - ) - ordered_code_bytes: "Type.Int64.Encoding.OrderedCodeBytes" = proto.Field( - proto.MESSAGE, - number=2, - oneof="encoding", - message="Type.Int64.Encoding.OrderedCodeBytes", - ) - - encoding: "Type.Int64.Encoding" = proto.Field( - proto.MESSAGE, - number=1, - message="Type.Int64.Encoding", - ) - - class Bool(proto.Message): - r"""bool Values of type ``Bool`` are stored in ``Value.bool_value``.""" - - class Float32(proto.Message): - r"""Float32 Values of type ``Float32`` are stored in - ``Value.float_value``. - - """ - - class Float64(proto.Message): - r"""Float64 Values of type ``Float64`` are stored in - ``Value.float_value``. - - """ - - class Timestamp(proto.Message): - r"""Timestamp Values of type ``Timestamp`` are stored in - ``Value.timestamp_value``. - - Attributes: - encoding (google.cloud.bigtable_admin_v2.types.Type.Timestamp.Encoding): - The encoding to use when converting to or - from lower level types. - """ - - class Encoding(proto.Message): - r"""Rules used to convert to or from lower level types. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - unix_micros_int64 (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding): - Encodes the number of microseconds since the Unix epoch - using the given ``Int64`` encoding. Values must be - microsecond-aligned. - - Compatible with: - - - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS`` - - This field is a member of `oneof`_ ``encoding``. - """ - - unix_micros_int64: "Type.Int64.Encoding" = proto.Field( - proto.MESSAGE, - number=1, - oneof="encoding", - message="Type.Int64.Encoding", - ) - - encoding: "Type.Timestamp.Encoding" = proto.Field( - proto.MESSAGE, - number=1, - message="Type.Timestamp.Encoding", - ) - - class Date(proto.Message): - r"""Date Values of type ``Date`` are stored in ``Value.date_value``.""" - - class Struct(proto.Message): - r"""A structured data value, consisting of fields which map to - dynamically typed values. Values of type ``Struct`` are stored in - ``Value.array_value`` where entries are in the same order and number - as ``field_types``. - - Attributes: - fields (MutableSequence[google.cloud.bigtable_admin_v2.types.Type.Struct.Field]): - The names and types of the fields in this - struct. - encoding (google.cloud.bigtable_admin_v2.types.Type.Struct.Encoding): - The encoding to use when converting to or - from lower level types. - """ - - class Field(proto.Message): - r"""A struct field and its type. - - Attributes: - field_name (str): - The field name (optional). Fields without a ``field_name`` - are considered anonymous and cannot be referenced by name. - type_ (google.cloud.bigtable_admin_v2.types.Type): - The type of values in this field. - """ - - field_name: str = proto.Field( - proto.STRING, - number=1, - ) - type_: "Type" = proto.Field( - proto.MESSAGE, - number=2, - message="Type", - ) - - class Encoding(proto.Message): - r"""Rules used to convert to or from lower level types. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - singleton (google.cloud.bigtable_admin_v2.types.Type.Struct.Encoding.Singleton): - Use ``Singleton`` encoding. - - This field is a member of `oneof`_ ``encoding``. - delimited_bytes (google.cloud.bigtable_admin_v2.types.Type.Struct.Encoding.DelimitedBytes): - Use ``DelimitedBytes`` encoding. - - This field is a member of `oneof`_ ``encoding``. - ordered_code_bytes (google.cloud.bigtable_admin_v2.types.Type.Struct.Encoding.OrderedCodeBytes): - User ``OrderedCodeBytes`` encoding. - - This field is a member of `oneof`_ ``encoding``. - """ - - class Singleton(proto.Message): - r"""Uses the encoding of ``fields[0].type`` as-is. Only valid if - ``fields.size == 1``. - - """ - - class DelimitedBytes(proto.Message): - r"""Fields are encoded independently and concatenated with a - configurable ``delimiter`` in between. - - A struct with no fields defined is encoded as a single - ``delimiter``. - - Sorted mode: - - - Fields are encoded in sorted mode. - - Encoded field values must not contain any bytes <= - ``delimiter[0]`` - - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or - if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort - first. - - Distinct mode: - - - Fields are encoded in distinct mode. - - Encoded field values must not contain ``delimiter[0]``. - - Attributes: - delimiter (bytes): - Byte sequence used to delimit concatenated - fields. The delimiter must contain at least 1 - character and at most 50 characters. - """ - - delimiter: bytes = proto.Field( - proto.BYTES, - number=1, - ) - - class OrderedCodeBytes(proto.Message): - r"""Fields are encoded independently and concatenated with the fixed - byte pair {0x00, 0x01} in between. - - Any null (0x00) byte in an encoded field is replaced by the fixed - byte pair {0x00, 0xFF}. - - Fields that encode to the empty string "" have special handling: - - - If *every* field encodes to "", or if the STRUCT has no fields - defined, then the STRUCT is encoded as the fixed byte pair {0x00, - 0x00}. - - Otherwise, the STRUCT only encodes until the last non-empty - field, omitting any trailing empty fields. Any empty fields that - aren't omitted are replaced with the fixed byte pair {0x00, - 0x00}. - - Examples: - - - STRUCT() -> "\00\00" - - STRUCT("") -> "\00\00" - - STRUCT("", "") -> "\00\00" - - STRUCT("", "B") -> "\00\00" + "\00\01" + "B" - - STRUCT("A", "") -> "A" - - STRUCT("", "B", "") -> "\00\00" + "\00\01" + "B" - - STRUCT("A", "", "C") -> "A" + "\00\01" + "\00\00" + "\00\01" + - "C" - - Since null bytes are always escaped, this encoding can cause size - blowup for encodings like ``Int64.BigEndianBytes`` that are likely - to produce many such bytes. - - Sorted mode: - - - Fields are encoded in sorted mode. - - All values supported by the field encodings are allowed - - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or - if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort - first. - - Distinct mode: - - - Fields are encoded in distinct mode. - - All values supported by the field encodings are allowed. - - """ - - singleton: "Type.Struct.Encoding.Singleton" = proto.Field( - proto.MESSAGE, - number=1, - oneof="encoding", - message="Type.Struct.Encoding.Singleton", - ) - delimited_bytes: "Type.Struct.Encoding.DelimitedBytes" = proto.Field( - proto.MESSAGE, - number=2, - oneof="encoding", - message="Type.Struct.Encoding.DelimitedBytes", - ) - ordered_code_bytes: "Type.Struct.Encoding.OrderedCodeBytes" = proto.Field( - proto.MESSAGE, - number=3, - oneof="encoding", - message="Type.Struct.Encoding.OrderedCodeBytes", - ) - - fields: MutableSequence["Type.Struct.Field"] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message="Type.Struct.Field", - ) - encoding: "Type.Struct.Encoding" = proto.Field( - proto.MESSAGE, - number=2, - message="Type.Struct.Encoding", - ) - - class Proto(proto.Message): - r"""A protobuf message type. Values of type ``Proto`` are stored in - ``Value.bytes_value``. - - Attributes: - schema_bundle_id (str): - The ID of the schema bundle that this proto - is defined in. - message_name (str): - The fully qualified name of the protobuf - message, including package. In the format of - "foo.bar.Message". - """ - - schema_bundle_id: str = proto.Field( - proto.STRING, - number=1, - ) - message_name: str = proto.Field( - proto.STRING, - number=2, - ) - - class Enum(proto.Message): - r"""A protobuf enum type. Values of type ``Enum`` are stored in - ``Value.int_value``. - - Attributes: - schema_bundle_id (str): - The ID of the schema bundle that this enum is - defined in. - enum_name (str): - The fully qualified name of the protobuf enum - message, including package. In the format of - "foo.bar.EnumMessage". - """ - - schema_bundle_id: str = proto.Field( - proto.STRING, - number=1, - ) - enum_name: str = proto.Field( - proto.STRING, - number=2, - ) - - class Array(proto.Message): - r"""An ordered list of elements of a given type. Values of type - ``Array`` are stored in ``Value.array_value``. - - Attributes: - element_type (google.cloud.bigtable_admin_v2.types.Type): - The type of the elements in the array. This must not be - ``Array``. - """ - - element_type: "Type" = proto.Field( - proto.MESSAGE, - number=1, - message="Type", - ) - - class Map(proto.Message): - r"""A mapping of keys to values of a given type. Values of type ``Map`` - are stored in a ``Value.array_value`` where each entry is another - ``Value.array_value`` with two elements (the key and the value, in - that order). Normally encoded Map values won't have repeated keys, - however, clients are expected to handle the case in which they do. - If the same key appears multiple times, the *last* value takes - precedence. - - Attributes: - key_type (google.cloud.bigtable_admin_v2.types.Type): - The type of a map key. Only ``Bytes``, ``String``, and - ``Int64`` are allowed as key types. - value_type (google.cloud.bigtable_admin_v2.types.Type): - The type of the values in a map. - """ - - key_type: "Type" = proto.Field( - proto.MESSAGE, - number=1, - message="Type", - ) - value_type: "Type" = proto.Field( - proto.MESSAGE, - number=2, - message="Type", - ) - - class Aggregate(proto.Message): - r"""A value that combines incremental updates into a summarized value. - - Data is never directly written or read using type ``Aggregate``. - Writes will provide either the ``input_type`` or ``state_type``, and - reads will always return the ``state_type`` . - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - input_type (google.cloud.bigtable_admin_v2.types.Type): - Type of the inputs that are accumulated by this - ``Aggregate``, which must specify a full encoding. Use - ``AddInput`` mutations to accumulate new inputs. - state_type (google.cloud.bigtable_admin_v2.types.Type): - Output only. Type that holds the internal accumulator state - for the ``Aggregate``. This is a function of the - ``input_type`` and ``aggregator`` chosen, and will always - specify a full encoding. - sum (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Sum): - Sum aggregator. - - This field is a member of `oneof`_ ``aggregator``. - hllpp_unique_count (google.cloud.bigtable_admin_v2.types.Type.Aggregate.HyperLogLogPlusPlusUniqueCount): - HyperLogLogPlusPlusUniqueCount aggregator. - - This field is a member of `oneof`_ ``aggregator``. - max_ (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Max): - Max aggregator. - - This field is a member of `oneof`_ ``aggregator``. - min_ (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Min): - Min aggregator. - - This field is a member of `oneof`_ ``aggregator``. - """ - - class Sum(proto.Message): - r"""Computes the sum of the input values. Allowed input: ``Int64`` - State: same as input - - """ - - class Max(proto.Message): - r"""Computes the max of the input values. Allowed input: ``Int64`` - State: same as input - - """ - - class Min(proto.Message): - r"""Computes the min of the input values. Allowed input: ``Int64`` - State: same as input - - """ - - class HyperLogLogPlusPlusUniqueCount(proto.Message): - r"""Computes an approximate unique count over the input values. When - using raw data as input, be careful to use a consistent encoding. - Otherwise the same value encoded differently could count more than - once, or two distinct values could count as identical. Input: Any, - or omit for Raw State: TBD Special state conversions: ``Int64`` (the - unique count estimate) - - """ - - input_type: "Type" = proto.Field( - proto.MESSAGE, - number=1, - message="Type", - ) - state_type: "Type" = proto.Field( - proto.MESSAGE, - number=2, - message="Type", - ) - sum: "Type.Aggregate.Sum" = proto.Field( - proto.MESSAGE, - number=4, - oneof="aggregator", - message="Type.Aggregate.Sum", - ) - hllpp_unique_count: "Type.Aggregate.HyperLogLogPlusPlusUniqueCount" = ( - proto.Field( - proto.MESSAGE, - number=5, - oneof="aggregator", - message="Type.Aggregate.HyperLogLogPlusPlusUniqueCount", - ) - ) - max_: "Type.Aggregate.Max" = proto.Field( - proto.MESSAGE, - number=6, - oneof="aggregator", - message="Type.Aggregate.Max", - ) - min_: "Type.Aggregate.Min" = proto.Field( - proto.MESSAGE, - number=7, - oneof="aggregator", - message="Type.Aggregate.Min", - ) - - bytes_type: Bytes = proto.Field( - proto.MESSAGE, - number=1, - oneof="kind", - message=Bytes, - ) - string_type: String = proto.Field( - proto.MESSAGE, - number=2, - oneof="kind", - message=String, - ) - int64_type: Int64 = proto.Field( - proto.MESSAGE, - number=5, - oneof="kind", - message=Int64, - ) - float32_type: Float32 = proto.Field( - proto.MESSAGE, - number=12, - oneof="kind", - message=Float32, - ) - float64_type: Float64 = proto.Field( - proto.MESSAGE, - number=9, - oneof="kind", - message=Float64, - ) - bool_type: Bool = proto.Field( - proto.MESSAGE, - number=8, - oneof="kind", - message=Bool, - ) - timestamp_type: Timestamp = proto.Field( - proto.MESSAGE, - number=10, - oneof="kind", - message=Timestamp, - ) - date_type: Date = proto.Field( - proto.MESSAGE, - number=11, - oneof="kind", - message=Date, - ) - aggregate_type: Aggregate = proto.Field( - proto.MESSAGE, - number=6, - oneof="kind", - message=Aggregate, - ) - struct_type: Struct = proto.Field( - proto.MESSAGE, - number=7, - oneof="kind", - message=Struct, - ) - array_type: Array = proto.Field( - proto.MESSAGE, - number=3, - oneof="kind", - message=Array, - ) - map_type: Map = proto.Field( - proto.MESSAGE, - number=4, - oneof="kind", - message=Map, - ) - proto_type: Proto = proto.Field( - proto.MESSAGE, - number=13, - oneof="kind", - message=Proto, - ) - enum_type: Enum = proto.Field( - proto.MESSAGE, - number=14, - oneof="kind", - message=Enum, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py deleted file mode 100644 index 82dafab44..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateAppProfile -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_create_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - app_profile = bigtable_admin_v2.AppProfile() - app_profile.priority = "PRIORITY_HIGH" - - request = bigtable_admin_v2.CreateAppProfileRequest( - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=app_profile, - ) - - # Make the request - response = await client.create_app_profile(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py deleted file mode 100644 index 82ff382b7..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateAppProfile -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_create_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - app_profile = bigtable_admin_v2.AppProfile() - app_profile.priority = "PRIORITY_HIGH" - - request = bigtable_admin_v2.CreateAppProfileRequest( - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=app_profile, - ) - - # Make the request - response = client.create_app_profile(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py deleted file mode 100644 index fb9fac60f..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateCluster -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_create_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateClusterRequest( - parent="parent_value", - cluster_id="cluster_id_value", - ) - - # Make the request - operation = client.create_cluster(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py deleted file mode 100644 index d8d5f9958..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateCluster -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_create_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateClusterRequest( - parent="parent_value", - cluster_id="cluster_id_value", - ) - - # Make the request - operation = client.create_cluster(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py deleted file mode 100644 index dbde6c4bc..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateInstance -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_create_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - instance = bigtable_admin_v2.Instance() - instance.display_name = "display_name_value" - - request = bigtable_admin_v2.CreateInstanceRequest( - parent="parent_value", - instance_id="instance_id_value", - instance=instance, - ) - - # Make the request - operation = client.create_instance(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py deleted file mode 100644 index 83ec90e53..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateInstance -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_create_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - instance = bigtable_admin_v2.Instance() - instance.display_name = "display_name_value" - - request = bigtable_admin_v2.CreateInstanceRequest( - parent="parent_value", - instance_id="instance_id_value", - instance=instance, - ) - - # Make the request - operation = client.create_instance(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py deleted file mode 100644 index 6dfb1d612..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateLogicalView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_create_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - logical_view = bigtable_admin_v2.LogicalView() - logical_view.query = "query_value" - - request = bigtable_admin_v2.CreateLogicalViewRequest( - parent="parent_value", - logical_view_id="logical_view_id_value", - logical_view=logical_view, - ) - - # Make the request - operation = client.create_logical_view(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py deleted file mode 100644 index f0214acbf..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateLogicalView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_create_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - logical_view = bigtable_admin_v2.LogicalView() - logical_view.query = "query_value" - - request = bigtable_admin_v2.CreateLogicalViewRequest( - parent="parent_value", - logical_view_id="logical_view_id_value", - logical_view=logical_view, - ) - - # Make the request - operation = client.create_logical_view(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py deleted file mode 100644 index 30481d2f3..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateMaterializedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_create_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - materialized_view = bigtable_admin_v2.MaterializedView() - materialized_view.query = "query_value" - - request = bigtable_admin_v2.CreateMaterializedViewRequest( - parent="parent_value", - materialized_view_id="materialized_view_id_value", - materialized_view=materialized_view, - ) - - # Make the request - operation = client.create_materialized_view(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py deleted file mode 100644 index 45116fb49..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateMaterializedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_create_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - materialized_view = bigtable_admin_v2.MaterializedView() - materialized_view.query = "query_value" - - request = bigtable_admin_v2.CreateMaterializedViewRequest( - parent="parent_value", - materialized_view_id="materialized_view_id_value", - materialized_view=materialized_view, - ) - - # Make the request - operation = client.create_materialized_view(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py deleted file mode 100644 index 76d272519..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteAppProfile -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_delete_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteAppProfileRequest( - name="name_value", - ignore_warnings=True, - ) - - # Make the request - await client.delete_app_profile(request=request) - - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py deleted file mode 100644 index 47f552fb8..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteAppProfile -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_delete_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteAppProfileRequest( - name="name_value", - ignore_warnings=True, - ) - - # Make the request - client.delete_app_profile(request=request) - - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py deleted file mode 100644 index 6f97b6a5e..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteCluster -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_delete_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteClusterRequest( - name="name_value", - ) - - # Make the request - await client.delete_cluster(request=request) - - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py deleted file mode 100644 index d058a08e6..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteCluster -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_delete_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteClusterRequest( - name="name_value", - ) - - # Make the request - client.delete_cluster(request=request) - - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py deleted file mode 100644 index ecf5583be..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteInstance -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_delete_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteInstanceRequest( - name="name_value", - ) - - # Make the request - await client.delete_instance(request=request) - - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py deleted file mode 100644 index e8f568486..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteInstance -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_delete_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteInstanceRequest( - name="name_value", - ) - - # Make the request - client.delete_instance(request=request) - - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py deleted file mode 100644 index 93f9d8ce8..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteLogicalView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_delete_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteLogicalViewRequest( - name="name_value", - ) - - # Make the request - await client.delete_logical_view(request=request) - - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py deleted file mode 100644 index fdece2bbc..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteLogicalView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_delete_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteLogicalViewRequest( - name="name_value", - ) - - # Make the request - client.delete_logical_view(request=request) - - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py deleted file mode 100644 index 22a9f0ad4..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteMaterializedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_delete_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteMaterializedViewRequest( - name="name_value", - ) - - # Make the request - await client.delete_materialized_view(request=request) - - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py deleted file mode 100644 index b6cf3a453..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteMaterializedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_delete_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteMaterializedViewRequest( - name="name_value", - ) - - # Make the request - client.delete_materialized_view(request=request) - - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py deleted file mode 100644 index 3a59ca599..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetAppProfile -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_get_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetAppProfileRequest( - name="name_value", - ) - - # Make the request - response = await client.get_app_profile(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py deleted file mode 100644 index 2e54bfcad..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetAppProfile -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_get_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetAppProfileRequest( - name="name_value", - ) - - # Make the request - response = client.get_app_profile(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py deleted file mode 100644 index b4d89a11d..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetCluster -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_get_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetClusterRequest( - name="name_value", - ) - - # Make the request - response = await client.get_cluster(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py deleted file mode 100644 index 25a80a871..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetCluster -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_get_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetClusterRequest( - name="name_value", - ) - - # Make the request - response = client.get_cluster(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py deleted file mode 100644 index b2e479c11..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetIamPolicy -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 -from google.iam.v1 import iam_policy_pb2 # type: ignore - - -async def sample_get_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = iam_policy_pb2.GetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = await client.get_iam_policy(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py deleted file mode 100644 index ffb2a81b0..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetIamPolicy -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 -from google.iam.v1 import iam_policy_pb2 # type: ignore - - -def sample_get_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = iam_policy_pb2.GetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = client.get_iam_policy(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py deleted file mode 100644 index b76fac83a..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetInstance -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_get_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetInstanceRequest( - name="name_value", - ) - - # Make the request - response = await client.get_instance(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py deleted file mode 100644 index 711ed99a5..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetInstance -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_get_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetInstanceRequest( - name="name_value", - ) - - # Make the request - response = client.get_instance(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py deleted file mode 100644 index 4ce25cdda..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetLogicalView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_get_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetLogicalViewRequest( - name="name_value", - ) - - # Make the request - response = await client.get_logical_view(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py deleted file mode 100644 index daaf7fa63..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetLogicalView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_get_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetLogicalViewRequest( - name="name_value", - ) - - # Make the request - response = client.get_logical_view(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py deleted file mode 100644 index 165fb262c..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetMaterializedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_get_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetMaterializedViewRequest( - name="name_value", - ) - - # Make the request - response = await client.get_materialized_view(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py deleted file mode 100644 index 1f94e3954..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetMaterializedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_get_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetMaterializedViewRequest( - name="name_value", - ) - - # Make the request - response = client.get_materialized_view(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py deleted file mode 100644 index d377fc678..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListAppProfiles -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_list_app_profiles(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListAppProfilesRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_app_profiles(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py deleted file mode 100644 index 07f49ba39..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListAppProfiles -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_list_app_profiles(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListAppProfilesRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_app_profiles(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py deleted file mode 100644 index 71532d98a..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListClusters -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_list_clusters(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListClustersRequest( - parent="parent_value", - ) - - # Make the request - response = await client.list_clusters(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py deleted file mode 100644 index 1c36c098d..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListClusters -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_list_clusters(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListClustersRequest( - parent="parent_value", - ) - - # Make the request - response = client.list_clusters(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py deleted file mode 100644 index cb6d58847..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListHotTablets -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_list_hot_tablets(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListHotTabletsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_hot_tablets(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py deleted file mode 100644 index 5add7715d..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListHotTablets -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_list_hot_tablets(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListHotTabletsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_hot_tablets(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py deleted file mode 100644 index 91c9a8230..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListInstances -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_list_instances(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListInstancesRequest( - parent="parent_value", - ) - - # Make the request - response = await client.list_instances(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py deleted file mode 100644 index bbe708c0e..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListInstances -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_list_instances(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListInstancesRequest( - parent="parent_value", - ) - - # Make the request - response = client.list_instances(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py deleted file mode 100644 index 8de9bd06e..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListLogicalViews -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_list_logical_views(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListLogicalViewsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_logical_views(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py deleted file mode 100644 index b5fb602cd..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListLogicalViews -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_list_logical_views(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListLogicalViewsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_logical_views(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py deleted file mode 100644 index 6fa672a25..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListMaterializedViews -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_list_materialized_views(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListMaterializedViewsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_materialized_views(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py deleted file mode 100644 index 5a25da88a..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListMaterializedViews -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_list_materialized_views(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListMaterializedViewsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_materialized_views(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py deleted file mode 100644 index dab73b9cb..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for PartialUpdateCluster -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_partial_update_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.PartialUpdateClusterRequest( - ) - - # Make the request - operation = client.partial_update_cluster(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py deleted file mode 100644 index bab63c6ed..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for PartialUpdateCluster -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_partial_update_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.PartialUpdateClusterRequest( - ) - - # Make the request - operation = client.partial_update_cluster(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py deleted file mode 100644 index 4c5e53ebe..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for PartialUpdateInstance -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_partial_update_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - instance = bigtable_admin_v2.Instance() - instance.display_name = "display_name_value" - - request = bigtable_admin_v2.PartialUpdateInstanceRequest( - instance=instance, - ) - - # Make the request - operation = client.partial_update_instance(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py deleted file mode 100644 index 0d2a74cfc..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for PartialUpdateInstance -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_partial_update_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - instance = bigtable_admin_v2.Instance() - instance.display_name = "display_name_value" - - request = bigtable_admin_v2.PartialUpdateInstanceRequest( - instance=instance, - ) - - # Make the request - operation = client.partial_update_instance(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py deleted file mode 100644 index b389b7679..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for SetIamPolicy -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 -from google.iam.v1 import iam_policy_pb2 # type: ignore - - -async def sample_set_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = iam_policy_pb2.SetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = await client.set_iam_policy(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py deleted file mode 100644 index 97bc29d65..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for SetIamPolicy -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 -from google.iam.v1 import iam_policy_pb2 # type: ignore - - -def sample_set_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = iam_policy_pb2.SetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = client.set_iam_policy(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py deleted file mode 100644 index 977f79d9b..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for TestIamPermissions -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 -from google.iam.v1 import iam_policy_pb2 # type: ignore - - -async def sample_test_iam_permissions(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = iam_policy_pb2.TestIamPermissionsRequest( - resource="resource_value", - permissions=['permissions_value1', 'permissions_value2'], - ) - - # Make the request - response = await client.test_iam_permissions(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py deleted file mode 100644 index db047d367..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for TestIamPermissions -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 -from google.iam.v1 import iam_policy_pb2 # type: ignore - - -def sample_test_iam_permissions(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = iam_policy_pb2.TestIamPermissionsRequest( - resource="resource_value", - permissions=['permissions_value1', 'permissions_value2'], - ) - - # Make the request - response = client.test_iam_permissions(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py deleted file mode 100644 index 2c55a45bd..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateAppProfile -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_update_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - app_profile = bigtable_admin_v2.AppProfile() - app_profile.priority = "PRIORITY_HIGH" - - request = bigtable_admin_v2.UpdateAppProfileRequest( - app_profile=app_profile, - ) - - # Make the request - operation = client.update_app_profile(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py deleted file mode 100644 index a7b683426..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateAppProfile -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_update_app_profile(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - app_profile = bigtable_admin_v2.AppProfile() - app_profile.priority = "PRIORITY_HIGH" - - request = bigtable_admin_v2.UpdateAppProfileRequest( - app_profile=app_profile, - ) - - # Make the request - operation = client.update_app_profile(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py deleted file mode 100644 index af3abde41..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateCluster -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_update_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.Cluster( - ) - - # Make the request - operation = client.update_cluster(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py deleted file mode 100644 index ec02a64af..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateCluster -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_update_cluster(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.Cluster( - ) - - # Make the request - operation = client.update_cluster(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py deleted file mode 100644 index 798afaf80..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateInstance -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_update_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.Instance( - display_name="display_name_value", - ) - - # Make the request - response = await client.update_instance(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py deleted file mode 100644 index fb6e5e2d3..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateInstance -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_update_instance(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.Instance( - display_name="display_name_value", - ) - - # Make the request - response = client.update_instance(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py deleted file mode 100644 index 9bdd620e6..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateLogicalView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_update_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - logical_view = bigtable_admin_v2.LogicalView() - logical_view.query = "query_value" - - request = bigtable_admin_v2.UpdateLogicalViewRequest( - logical_view=logical_view, - ) - - # Make the request - operation = client.update_logical_view(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py deleted file mode 100644 index 10d962205..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateLogicalView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_update_logical_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - logical_view = bigtable_admin_v2.LogicalView() - logical_view.query = "query_value" - - request = bigtable_admin_v2.UpdateLogicalViewRequest( - logical_view=logical_view, - ) - - # Make the request - operation = client.update_logical_view(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py deleted file mode 100644 index ddd930475..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateMaterializedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_update_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() - - # Initialize request argument(s) - materialized_view = bigtable_admin_v2.MaterializedView() - materialized_view.query = "query_value" - - request = bigtable_admin_v2.UpdateMaterializedViewRequest( - materialized_view=materialized_view, - ) - - # Make the request - operation = client.update_materialized_view(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py deleted file mode 100644 index a2ef78bd3..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateMaterializedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_update_materialized_view(): - # Create a client - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Initialize request argument(s) - materialized_view = bigtable_admin_v2.MaterializedView() - materialized_view.query = "query_value" - - request = bigtable_admin_v2.UpdateMaterializedViewRequest( - materialized_view=materialized_view, - ) - - # Make the request - operation = client.update_materialized_view(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py deleted file mode 100644 index 4cd57edc8..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CheckConsistency -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_check_consistency(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CheckConsistencyRequest( - name="name_value", - consistency_token="consistency_token_value", - ) - - # Make the request - response = await client.check_consistency(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py deleted file mode 100644 index ec6085b3f..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CheckConsistency -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_check_consistency(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CheckConsistencyRequest( - name="name_value", - consistency_token="consistency_token_value", - ) - - # Make the request - response = client.check_consistency(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py deleted file mode 100644 index 9355b7d44..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CopyBackup -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_copy_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CopyBackupRequest( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - ) - - # Make the request - operation = client.copy_backup(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py deleted file mode 100644 index 25456ad21..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CopyBackup -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_copy_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CopyBackupRequest( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - ) - - # Make the request - operation = client.copy_backup(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py deleted file mode 100644 index 135bbe220..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateAuthorizedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_create_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateAuthorizedViewRequest( - parent="parent_value", - authorized_view_id="authorized_view_id_value", - ) - - # Make the request - operation = client.create_authorized_view(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py deleted file mode 100644 index cafbf56cb..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateAuthorizedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_create_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateAuthorizedViewRequest( - parent="parent_value", - authorized_view_id="authorized_view_id_value", - ) - - # Make the request - operation = client.create_authorized_view(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py deleted file mode 100644 index d9bd402b4..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateBackup -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_create_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - backup = bigtable_admin_v2.Backup() - backup.source_table = "source_table_value" - - request = bigtable_admin_v2.CreateBackupRequest( - parent="parent_value", - backup_id="backup_id_value", - backup=backup, - ) - - # Make the request - operation = client.create_backup(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py deleted file mode 100644 index 835f0573c..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateBackup -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_create_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - backup = bigtable_admin_v2.Backup() - backup.source_table = "source_table_value" - - request = bigtable_admin_v2.CreateBackupRequest( - parent="parent_value", - backup_id="backup_id_value", - backup=backup, - ) - - # Make the request - operation = client.create_backup(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async_internal.py deleted file mode 100644 index cbcf2aef2..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async_internal.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateSchemaBundle -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_async_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_create_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - schema_bundle = bigtable_admin_v2.SchemaBundle() - schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' - - request = bigtable_admin_v2.CreateSchemaBundleRequest( - parent="parent_value", - schema_bundle_id="schema_bundle_id_value", - schema_bundle=schema_bundle, - ) - - # Make the request - operation = client._create_schema_bundle(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_async_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync_internal.py deleted file mode 100644 index 6f67869eb..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync_internal.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateSchemaBundle -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_sync_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_create_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - schema_bundle = bigtable_admin_v2.SchemaBundle() - schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' - - request = bigtable_admin_v2.CreateSchemaBundleRequest( - parent="parent_value", - schema_bundle_id="schema_bundle_id_value", - schema_bundle=schema_bundle, - ) - - # Make the request - operation = client._create_schema_bundle(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_sync_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py deleted file mode 100644 index 3096539b9..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_create_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateTableRequest( - parent="parent_value", - table_id="table_id_value", - ) - - # Make the request - response = await client.create_table(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py deleted file mode 100644 index f7767438e..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateTableFromSnapshot -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_create_table_from_snapshot(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateTableFromSnapshotRequest( - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - - # Make the request - operation = client.create_table_from_snapshot(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py deleted file mode 100644 index ff1dd7899..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateTableFromSnapshot -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_create_table_from_snapshot(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateTableFromSnapshotRequest( - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - - # Make the request - operation = client.create_table_from_snapshot(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py deleted file mode 100644 index 552a1095f..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for CreateTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_create_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.CreateTableRequest( - parent="parent_value", - table_id="table_id_value", - ) - - # Make the request - response = client.create_table(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py deleted file mode 100644 index cbee06ae1..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteAuthorizedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_delete_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteAuthorizedViewRequest( - name="name_value", - ) - - # Make the request - await client.delete_authorized_view(request=request) - - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py deleted file mode 100644 index 298e66efb..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteAuthorizedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_delete_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteAuthorizedViewRequest( - name="name_value", - ) - - # Make the request - client.delete_authorized_view(request=request) - - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py deleted file mode 100644 index d2615f792..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteBackup -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_delete_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteBackupRequest( - name="name_value", - ) - - # Make the request - await client.delete_backup(request=request) - - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py deleted file mode 100644 index c9888bf39..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteBackup -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_delete_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteBackupRequest( - name="name_value", - ) - - # Make the request - client.delete_backup(request=request) - - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async_internal.py deleted file mode 100644 index 3f4dc292c..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async_internal.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteSchemaBundle -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_async_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_delete_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteSchemaBundleRequest( - name="name_value", - ) - - # Make the request - await client._delete_schema_bundle(request=request) - - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_async_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync_internal.py deleted file mode 100644 index eaeeb6cad..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync_internal.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteSchemaBundle -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_sync_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_delete_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteSchemaBundleRequest( - name="name_value", - ) - - # Make the request - client._delete_schema_bundle(request=request) - - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_sync_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async_internal.py deleted file mode 100644 index c0d8b3279..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async_internal.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteSnapshot -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_async_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_delete_snapshot(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteSnapshotRequest( - name="name_value", - ) - - # Make the request - await client._delete_snapshot(request=request) - - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_async_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync_internal.py deleted file mode 100644 index 114b8d1ed..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync_internal.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteSnapshot -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_sync_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_delete_snapshot(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteSnapshotRequest( - name="name_value", - ) - - # Make the request - client._delete_snapshot(request=request) - - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_sync_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py deleted file mode 100644 index 375e61557..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_delete_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteTableRequest( - name="name_value", - ) - - # Make the request - await client.delete_table(request=request) - - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py deleted file mode 100644 index 17397bfab..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DeleteTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_delete_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DeleteTableRequest( - name="name_value", - ) - - # Make the request - client.delete_table(request=request) - - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py deleted file mode 100644 index 391205c7c..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DropRowRange -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_drop_row_range(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DropRowRangeRequest( - row_key_prefix=b'row_key_prefix_blob', - name="name_value", - ) - - # Make the request - await client.drop_row_range(request=request) - - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py deleted file mode 100644 index bcd528f1a..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for DropRowRange -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_drop_row_range(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.DropRowRangeRequest( - row_key_prefix=b'row_key_prefix_blob', - name="name_value", - ) - - # Make the request - client.drop_row_range(request=request) - - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py deleted file mode 100644 index 1953441b6..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GenerateConsistencyToken -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_generate_consistency_token(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GenerateConsistencyTokenRequest( - name="name_value", - ) - - # Make the request - response = await client.generate_consistency_token(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py deleted file mode 100644 index 4ae52264d..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GenerateConsistencyToken -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_generate_consistency_token(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GenerateConsistencyTokenRequest( - name="name_value", - ) - - # Make the request - response = client.generate_consistency_token(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py deleted file mode 100644 index 129948bc5..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetAuthorizedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_get_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetAuthorizedViewRequest( - name="name_value", - ) - - # Make the request - response = await client.get_authorized_view(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py deleted file mode 100644 index 9cc63538c..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetAuthorizedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_get_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetAuthorizedViewRequest( - name="name_value", - ) - - # Make the request - response = client.get_authorized_view(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py deleted file mode 100644 index 524d63e86..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetBackup -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_get_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetBackupRequest( - name="name_value", - ) - - # Make the request - response = await client.get_backup(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py deleted file mode 100644 index 5ed91b80c..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetBackup -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_get_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetBackupRequest( - name="name_value", - ) - - # Make the request - response = client.get_backup(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py deleted file mode 100644 index a599239d5..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetIamPolicy -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 -from google.iam.v1 import iam_policy_pb2 # type: ignore - - -async def sample_get_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = iam_policy_pb2.GetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = await client.get_iam_policy(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py deleted file mode 100644 index 2d6e71c27..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetIamPolicy -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 -from google.iam.v1 import iam_policy_pb2 # type: ignore - - -def sample_get_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = iam_policy_pb2.GetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = client.get_iam_policy(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async_internal.py deleted file mode 100644 index 3bf768d54..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async_internal.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetSchemaBundle -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_async_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_get_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetSchemaBundleRequest( - name="name_value", - ) - - # Make the request - response = await client._get_schema_bundle(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_async_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync_internal.py deleted file mode 100644 index 1f9a02521..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync_internal.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetSchemaBundle -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_sync_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_get_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetSchemaBundleRequest( - name="name_value", - ) - - # Make the request - response = client._get_schema_bundle(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_sync_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async_internal.py deleted file mode 100644 index a9981a3c5..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async_internal.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetSnapshot -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_async_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_get_snapshot(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetSnapshotRequest( - name="name_value", - ) - - # Make the request - response = await client._get_snapshot(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_async_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync_internal.py deleted file mode 100644 index 6e2225b3c..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync_internal.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetSnapshot -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_sync_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_get_snapshot(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetSnapshotRequest( - name="name_value", - ) - - # Make the request - response = client._get_snapshot(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_sync_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py deleted file mode 100644 index ff8dff1ae..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_get_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetTableRequest( - name="name_value", - ) - - # Make the request - response = await client.get_table(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py deleted file mode 100644 index ccb68b766..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for GetTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_get_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.GetTableRequest( - name="name_value", - ) - - # Make the request - response = client.get_table(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py deleted file mode 100644 index 658b8f96a..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListAuthorizedViews -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_list_authorized_views(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListAuthorizedViewsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_authorized_views(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py deleted file mode 100644 index a7bf4b6ad..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListAuthorizedViews -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_list_authorized_views(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListAuthorizedViewsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_authorized_views(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py deleted file mode 100644 index 368c376f0..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListBackups -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_list_backups(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListBackupsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_backups(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py deleted file mode 100644 index ca0e3e0f2..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListBackups -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_list_backups(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListBackupsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_backups(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async_internal.py deleted file mode 100644 index 7e56085c5..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async_internal.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListSchemaBundles -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_async_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_list_schema_bundles(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListSchemaBundlesRequest( - parent="parent_value", - ) - - # Make the request - page_result = client._list_schema_bundles(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_async_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync_internal.py deleted file mode 100644 index 7e75c0f69..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync_internal.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListSchemaBundles -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_sync_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_list_schema_bundles(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListSchemaBundlesRequest( - parent="parent_value", - ) - - # Make the request - page_result = client._list_schema_bundles(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_sync_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async_internal.py deleted file mode 100644 index a4deda5e7..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async_internal.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListSnapshots -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_async_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_list_snapshots(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListSnapshotsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client._list_snapshots(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_async_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync_internal.py deleted file mode 100644 index f3e034e29..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync_internal.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListSnapshots -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_sync_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_list_snapshots(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListSnapshotsRequest( - parent="parent_value", - ) - - # Make the request - page_result = client._list_snapshots(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_sync_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py deleted file mode 100644 index 191de0fc7..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListTables -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_list_tables(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListTablesRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_tables(request=request) - - # Handle the response - async for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py deleted file mode 100644 index 5d0f3a278..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ListTables -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_list_tables(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ListTablesRequest( - parent="parent_value", - ) - - # Make the request - page_result = client.list_tables(request=request) - - # Handle the response - for response in page_result: - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async_internal.py deleted file mode 100644 index 964ecbd6a..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async_internal.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ModifyColumnFamilies -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_async_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_modify_column_families(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ModifyColumnFamiliesRequest( - name="name_value", - ) - - # Make the request - response = await client._modify_column_families(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_async_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync_internal.py deleted file mode 100644 index 4c80cecfd..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync_internal.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for ModifyColumnFamilies -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_sync_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_modify_column_families(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.ModifyColumnFamiliesRequest( - name="name_value", - ) - - # Make the request - response = client._modify_column_families(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_sync_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py deleted file mode 100644 index f70b5da17..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for RestoreTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_async_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_restore_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.RestoreTableRequest( - backup="backup_value", - parent="parent_value", - table_id="table_id_value", - ) - - # Make the request - operation = client._restore_table(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_async_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py deleted file mode 100644 index 45621c22b..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for RestoreTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_sync_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_restore_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.RestoreTableRequest( - backup="backup_value", - parent="parent_value", - table_id="table_id_value", - ) - - # Make the request - operation = client._restore_table(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_sync_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py deleted file mode 100644 index cbfafdc77..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for SetIamPolicy -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 -from google.iam.v1 import iam_policy_pb2 # type: ignore - - -async def sample_set_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = iam_policy_pb2.SetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = await client.set_iam_policy(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py deleted file mode 100644 index 9a6c5fcc2..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for SetIamPolicy -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 -from google.iam.v1 import iam_policy_pb2 # type: ignore - - -def sample_set_iam_policy(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = iam_policy_pb2.SetIamPolicyRequest( - resource="resource_value", - ) - - # Make the request - response = client.set_iam_policy(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async_internal.py deleted file mode 100644 index c519c19e6..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async_internal.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for SnapshotTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_async_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_snapshot_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.SnapshotTableRequest( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - ) - - # Make the request - operation = client._snapshot_table(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_async_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync_internal.py deleted file mode 100644 index e3bfdd735..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync_internal.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for SnapshotTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_sync_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_snapshot_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.SnapshotTableRequest( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - ) - - # Make the request - operation = client._snapshot_table(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_sync_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py deleted file mode 100644 index ee5fe6027..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for TestIamPermissions -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 -from google.iam.v1 import iam_policy_pb2 # type: ignore - - -async def sample_test_iam_permissions(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = iam_policy_pb2.TestIamPermissionsRequest( - resource="resource_value", - permissions=['permissions_value1', 'permissions_value2'], - ) - - # Make the request - response = await client.test_iam_permissions(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py deleted file mode 100644 index 46f0870b0..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for TestIamPermissions -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 -from google.iam.v1 import iam_policy_pb2 # type: ignore - - -def sample_test_iam_permissions(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = iam_policy_pb2.TestIamPermissionsRequest( - resource="resource_value", - permissions=['permissions_value1', 'permissions_value2'], - ) - - # Make the request - response = client.test_iam_permissions(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py deleted file mode 100644 index 1e2f6aa5a..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UndeleteTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_undelete_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.UndeleteTableRequest( - name="name_value", - ) - - # Make the request - operation = client.undelete_table(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py deleted file mode 100644 index 637afee8b..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UndeleteTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_undelete_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.UndeleteTableRequest( - name="name_value", - ) - - # Make the request - operation = client.undelete_table(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py deleted file mode 100644 index 541427d48..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateAuthorizedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_update_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.UpdateAuthorizedViewRequest( - ) - - # Make the request - operation = client.update_authorized_view(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py deleted file mode 100644 index 9c8198d9a..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateAuthorizedView -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_update_authorized_view(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.UpdateAuthorizedViewRequest( - ) - - # Make the request - operation = client.update_authorized_view(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py deleted file mode 100644 index f98e1e33a..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateBackup -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_update_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - backup = bigtable_admin_v2.Backup() - backup.source_table = "source_table_value" - - request = bigtable_admin_v2.UpdateBackupRequest( - backup=backup, - ) - - # Make the request - response = await client.update_backup(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py deleted file mode 100644 index 466a3decb..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateBackup -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_update_backup(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - backup = bigtable_admin_v2.Backup() - backup.source_table = "source_table_value" - - request = bigtable_admin_v2.UpdateBackupRequest( - backup=backup, - ) - - # Make the request - response = client.update_backup(request=request) - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async_internal.py deleted file mode 100644 index e7f76f66e..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async_internal.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateSchemaBundle -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_async_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_update_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - schema_bundle = bigtable_admin_v2.SchemaBundle() - schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' - - request = bigtable_admin_v2.UpdateSchemaBundleRequest( - schema_bundle=schema_bundle, - ) - - # Make the request - operation = client._update_schema_bundle(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_async_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync_internal.py deleted file mode 100644 index 74384eb13..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync_internal.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateSchemaBundle -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_sync_internal] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_update_schema_bundle(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - schema_bundle = bigtable_admin_v2.SchemaBundle() - schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' - - request = bigtable_admin_v2.UpdateSchemaBundleRequest( - schema_bundle=schema_bundle, - ) - - # Make the request - operation = client._update_schema_bundle(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_sync_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py deleted file mode 100644 index 93839d36f..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_async] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -async def sample_update_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.UpdateTableRequest( - ) - - # Make the request - operation = client.update_table(request=request) - - print("Waiting for operation to complete...") - - response = (await operation).result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py deleted file mode 100644 index fea09f6a8..000000000 --- a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generated code. DO NOT EDIT! -# -# Snippet for UpdateTable -# NOTE: This snippet has been automatically generated for illustrative purposes only. -# It may require modifications to work in your environment. - -# To install the latest published package dependency, execute the following: -# python3 -m pip install google-cloud-bigtable-admin - - -# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_sync] -# This snippet has been automatically generated and should be regarded as a -# code template only. -# It will require modifications to work: -# - It may require correct/in-range values for request initialization. -# - It may require specifying regional endpoints when creating the service -# client as shown in: -# https://googleapis.dev/python/google-api-core/latest/client_options.html -from google.cloud import bigtable_admin_v2 - - -def sample_update_table(): - # Create a client - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Initialize request argument(s) - request = bigtable_admin_v2.UpdateTableRequest( - ) - - # Make the request - operation = client.update_table(request=request) - - print("Waiting for operation to complete...") - - response = operation.result() - - # Handle the response - print(response) - -# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_sync] diff --git a/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json b/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json deleted file mode 100644 index a9360940d..000000000 --- a/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json +++ /dev/null @@ -1,10871 +0,0 @@ -{ - "clientLibrary": { - "apis": [ - { - "id": "google.bigtable.admin.v2", - "version": "v2" - } - ], - "language": "PYTHON", - "name": "google-cloud-bigtable-admin", - "version": "0.1.0" - }, - "snippets": [ - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_app_profile", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "CreateAppProfile" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "app_profile_id", - "type": "str" - }, - { - "name": "app_profile", - "type": "google.cloud.bigtable_admin_v2.types.AppProfile" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile", - "shortName": "create_app_profile" - }, - "description": "Sample for CreateAppProfile", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_async", - "segments": [ - { - "end": 56, - "start": 27, - "type": "FULL" - }, - { - "end": 56, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 53, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 57, - "start": 54, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_app_profile", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "CreateAppProfile" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "app_profile_id", - "type": "str" - }, - { - "name": "app_profile", - "type": "google.cloud.bigtable_admin_v2.types.AppProfile" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile", - "shortName": "create_app_profile" - }, - "description": "Sample for CreateAppProfile", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_sync", - "segments": [ - { - "end": 56, - "start": 27, - "type": "FULL" - }, - { - "end": 56, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 53, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 57, - "start": 54, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_cluster", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "CreateCluster" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateClusterRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "cluster_id", - "type": "str" - }, - { - "name": "cluster", - "type": "google.cloud.bigtable_admin_v2.types.Cluster" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_cluster" - }, - "description": "Sample for CreateCluster", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_async", - "segments": [ - { - "end": 56, - "start": 27, - "type": "FULL" - }, - { - "end": 56, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 53, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 57, - "start": 54, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_cluster", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "CreateCluster" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateClusterRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "cluster_id", - "type": "str" - }, - { - "name": "cluster", - "type": "google.cloud.bigtable_admin_v2.types.Cluster" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_cluster" - }, - "description": "Sample for CreateCluster", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_sync", - "segments": [ - { - "end": 56, - "start": 27, - "type": "FULL" - }, - { - "end": 56, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 53, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 57, - "start": 54, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_instance", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "CreateInstance" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateInstanceRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "instance_id", - "type": "str" - }, - { - "name": "instance", - "type": "google.cloud.bigtable_admin_v2.types.Instance" - }, - { - "name": "clusters", - "type": "MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_instance" - }, - "description": "Sample for CreateInstance", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_async", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_instance", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "CreateInstance" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateInstanceRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "instance_id", - "type": "str" - }, - { - "name": "instance", - "type": "google.cloud.bigtable_admin_v2.types.Instance" - }, - { - "name": "clusters", - "type": "MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_instance" - }, - "description": "Sample for CreateInstance", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_sync", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_logical_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateLogicalView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "CreateLogicalView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "logical_view", - "type": "google.cloud.bigtable_admin_v2.types.LogicalView" - }, - { - "name": "logical_view_id", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_logical_view" - }, - "description": "Sample for CreateLogicalView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_async", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_logical_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateLogicalView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "CreateLogicalView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "logical_view", - "type": "google.cloud.bigtable_admin_v2.types.LogicalView" - }, - { - "name": "logical_view_id", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_logical_view" - }, - "description": "Sample for CreateLogicalView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_sync", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_materialized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateMaterializedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "CreateMaterializedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "materialized_view", - "type": "google.cloud.bigtable_admin_v2.types.MaterializedView" - }, - { - "name": "materialized_view_id", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_materialized_view" - }, - "description": "Sample for CreateMaterializedView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_async", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_materialized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateMaterializedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "CreateMaterializedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "materialized_view", - "type": "google.cloud.bigtable_admin_v2.types.MaterializedView" - }, - { - "name": "materialized_view_id", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_materialized_view" - }, - "description": "Sample for CreateMaterializedView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_sync", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_app_profile", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "DeleteAppProfile" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "ignore_warnings", - "type": "bool" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_app_profile" - }, - "description": "Sample for DeleteAppProfile", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_async", - "segments": [ - { - "end": 50, - "start": 27, - "type": "FULL" - }, - { - "end": 50, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 51, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_app_profile", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "DeleteAppProfile" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "ignore_warnings", - "type": "bool" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_app_profile" - }, - "description": "Sample for DeleteAppProfile", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_sync", - "segments": [ - { - "end": 50, - "start": 27, - "type": "FULL" - }, - { - "end": 50, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 51, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_cluster", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "DeleteCluster" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteClusterRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_cluster" - }, - "description": "Sample for DeleteCluster", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_async", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_cluster", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "DeleteCluster" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteClusterRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_cluster" - }, - "description": "Sample for DeleteCluster", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_sync", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_instance", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "DeleteInstance" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_instance" - }, - "description": "Sample for DeleteInstance", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_async", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_instance", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "DeleteInstance" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_instance" - }, - "description": "Sample for DeleteInstance", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_sync", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_logical_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteLogicalView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "DeleteLogicalView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_logical_view" - }, - "description": "Sample for DeleteLogicalView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_async", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_logical_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteLogicalView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "DeleteLogicalView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_logical_view" - }, - "description": "Sample for DeleteLogicalView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_sync", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_materialized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteMaterializedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "DeleteMaterializedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_materialized_view" - }, - "description": "Sample for DeleteMaterializedView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_async", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_materialized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteMaterializedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "DeleteMaterializedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_materialized_view" - }, - "description": "Sample for DeleteMaterializedView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_sync", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_app_profile", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "GetAppProfile" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetAppProfileRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile", - "shortName": "get_app_profile" - }, - "description": "Sample for GetAppProfile", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_app_profile", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "GetAppProfile" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetAppProfileRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile", - "shortName": "get_app_profile" - }, - "description": "Sample for GetAppProfile", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_cluster", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "GetCluster" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetClusterRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Cluster", - "shortName": "get_cluster" - }, - "description": "Sample for GetCluster", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_cluster", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "GetCluster" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetClusterRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Cluster", - "shortName": "get_cluster" - }, - "description": "Sample for GetCluster", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_iam_policy", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "GetIamPolicy" - }, - "parameters": [ - { - "name": "request", - "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" - }, - { - "name": "resource", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.iam.v1.policy_pb2.Policy", - "shortName": "get_iam_policy" - }, - "description": "Sample for GetIamPolicy", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 41, - "start": 39, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 42, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_iam_policy", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "GetIamPolicy" - }, - "parameters": [ - { - "name": "request", - "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" - }, - { - "name": "resource", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.iam.v1.policy_pb2.Policy", - "shortName": "get_iam_policy" - }, - "description": "Sample for GetIamPolicy", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 41, - "start": 39, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 42, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_instance", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "GetInstance" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetInstanceRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Instance", - "shortName": "get_instance" - }, - "description": "Sample for GetInstance", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_instance", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "GetInstance" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetInstanceRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Instance", - "shortName": "get_instance" - }, - "description": "Sample for GetInstance", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_logical_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetLogicalView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "GetLogicalView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.LogicalView", - "shortName": "get_logical_view" - }, - "description": "Sample for GetLogicalView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_logical_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetLogicalView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "GetLogicalView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.LogicalView", - "shortName": "get_logical_view" - }, - "description": "Sample for GetLogicalView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_materialized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetMaterializedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "GetMaterializedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.MaterializedView", - "shortName": "get_materialized_view" - }, - "description": "Sample for GetMaterializedView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_materialized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetMaterializedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "GetMaterializedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.MaterializedView", - "shortName": "get_materialized_view" - }, - "description": "Sample for GetMaterializedView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_app_profiles", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "ListAppProfiles" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager", - "shortName": "list_app_profiles" - }, - "description": "Sample for ListAppProfiles", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_app_profiles", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "ListAppProfiles" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesPager", - "shortName": "list_app_profiles" - }, - "description": "Sample for ListAppProfiles", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_clusters", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "ListClusters" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListClustersRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.ListClustersResponse", - "shortName": "list_clusters" - }, - "description": "Sample for ListClusters", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_clusters", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "ListClusters" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListClustersRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.ListClustersResponse", - "shortName": "list_clusters" - }, - "description": "Sample for ListClusters", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_hot_tablets", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListHotTablets", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "ListHotTablets" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsAsyncPager", - "shortName": "list_hot_tablets" - }, - "description": "Sample for ListHotTablets", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_hot_tablets", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListHotTablets", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "ListHotTablets" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsPager", - "shortName": "list_hot_tablets" - }, - "description": "Sample for ListHotTablets", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_instances", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "ListInstances" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListInstancesRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.ListInstancesResponse", - "shortName": "list_instances" - }, - "description": "Sample for ListInstances", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_instances", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "ListInstances" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListInstancesRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.ListInstancesResponse", - "shortName": "list_instances" - }, - "description": "Sample for ListInstances", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_logical_views", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListLogicalViews", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "ListLogicalViews" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsAsyncPager", - "shortName": "list_logical_views" - }, - "description": "Sample for ListLogicalViews", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_logical_views", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListLogicalViews", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "ListLogicalViews" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsPager", - "shortName": "list_logical_views" - }, - "description": "Sample for ListLogicalViews", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_materialized_views", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListMaterializedViews", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "ListMaterializedViews" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsAsyncPager", - "shortName": "list_materialized_views" - }, - "description": "Sample for ListMaterializedViews", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_materialized_views", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListMaterializedViews", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "ListMaterializedViews" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsPager", - "shortName": "list_materialized_views" - }, - "description": "Sample for ListMaterializedViews", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.partial_update_cluster", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateCluster", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "PartialUpdateCluster" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest" - }, - { - "name": "cluster", - "type": "google.cloud.bigtable_admin_v2.types.Cluster" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "partial_update_cluster" - }, - "description": "Sample for PartialUpdateCluster", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_async", - "segments": [ - { - "end": 54, - "start": 27, - "type": "FULL" - }, - { - "end": 54, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 51, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 55, - "start": 52, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.partial_update_cluster", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateCluster", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "PartialUpdateCluster" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest" - }, - { - "name": "cluster", - "type": "google.cloud.bigtable_admin_v2.types.Cluster" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "partial_update_cluster" - }, - "description": "Sample for PartialUpdateCluster", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_sync", - "segments": [ - { - "end": 54, - "start": 27, - "type": "FULL" - }, - { - "end": 54, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 51, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 55, - "start": 52, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.partial_update_instance", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "PartialUpdateInstance" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest" - }, - { - "name": "instance", - "type": "google.cloud.bigtable_admin_v2.types.Instance" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "partial_update_instance" - }, - "description": "Sample for PartialUpdateInstance", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_async", - "segments": [ - { - "end": 58, - "start": 27, - "type": "FULL" - }, - { - "end": 58, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 48, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 55, - "start": 49, - "type": "REQUEST_EXECUTION" - }, - { - "end": 59, - "start": 56, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.partial_update_instance", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "PartialUpdateInstance" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest" - }, - { - "name": "instance", - "type": "google.cloud.bigtable_admin_v2.types.Instance" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "partial_update_instance" - }, - "description": "Sample for PartialUpdateInstance", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_sync", - "segments": [ - { - "end": 58, - "start": 27, - "type": "FULL" - }, - { - "end": 58, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 48, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 55, - "start": 49, - "type": "REQUEST_EXECUTION" - }, - { - "end": 59, - "start": 56, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.set_iam_policy", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "SetIamPolicy" - }, - "parameters": [ - { - "name": "request", - "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" - }, - { - "name": "resource", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.iam.v1.policy_pb2.Policy", - "shortName": "set_iam_policy" - }, - "description": "Sample for SetIamPolicy", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 41, - "start": 39, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 42, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.set_iam_policy", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "SetIamPolicy" - }, - "parameters": [ - { - "name": "request", - "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" - }, - { - "name": "resource", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.iam.v1.policy_pb2.Policy", - "shortName": "set_iam_policy" - }, - "description": "Sample for SetIamPolicy", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 41, - "start": 39, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 42, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.test_iam_permissions", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "TestIamPermissions" - }, - "parameters": [ - { - "name": "request", - "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" - }, - { - "name": "resource", - "type": "str" - }, - { - "name": "permissions", - "type": "MutableSequence[str]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", - "shortName": "test_iam_permissions" - }, - "description": "Sample for TestIamPermissions", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_async", - "segments": [ - { - "end": 53, - "start": 27, - "type": "FULL" - }, - { - "end": 53, - "start": 27, - "type": "SHORT" - }, - { - "end": 41, - "start": 39, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 47, - "start": 42, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 50, - "start": 48, - "type": "REQUEST_EXECUTION" - }, - { - "end": 54, - "start": 51, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.test_iam_permissions", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "TestIamPermissions" - }, - "parameters": [ - { - "name": "request", - "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" - }, - { - "name": "resource", - "type": "str" - }, - { - "name": "permissions", - "type": "MutableSequence[str]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", - "shortName": "test_iam_permissions" - }, - "description": "Sample for TestIamPermissions", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_sync", - "segments": [ - { - "end": 53, - "start": 27, - "type": "FULL" - }, - { - "end": 53, - "start": 27, - "type": "SHORT" - }, - { - "end": 41, - "start": 39, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 47, - "start": 42, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 50, - "start": 48, - "type": "REQUEST_EXECUTION" - }, - { - "end": 54, - "start": 51, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_app_profile", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "UpdateAppProfile" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest" - }, - { - "name": "app_profile", - "type": "google.cloud.bigtable_admin_v2.types.AppProfile" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_app_profile" - }, - "description": "Sample for UpdateAppProfile", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_async", - "segments": [ - { - "end": 58, - "start": 27, - "type": "FULL" - }, - { - "end": 58, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 48, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 55, - "start": 49, - "type": "REQUEST_EXECUTION" - }, - { - "end": 59, - "start": 56, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_app_profile", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "UpdateAppProfile" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest" - }, - { - "name": "app_profile", - "type": "google.cloud.bigtable_admin_v2.types.AppProfile" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_app_profile" - }, - "description": "Sample for UpdateAppProfile", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_sync", - "segments": [ - { - "end": 58, - "start": 27, - "type": "FULL" - }, - { - "end": 58, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 48, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 55, - "start": 49, - "type": "REQUEST_EXECUTION" - }, - { - "end": 59, - "start": 56, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_cluster", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "UpdateCluster" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.Cluster" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_cluster" - }, - "description": "Sample for UpdateCluster", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_async", - "segments": [ - { - "end": 54, - "start": 27, - "type": "FULL" - }, - { - "end": 54, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 51, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 55, - "start": 52, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_cluster", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "UpdateCluster" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.Cluster" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_cluster" - }, - "description": "Sample for UpdateCluster", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_sync", - "segments": [ - { - "end": 54, - "start": 27, - "type": "FULL" - }, - { - "end": 54, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 51, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 55, - "start": 52, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_instance", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "UpdateInstance" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.Instance" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Instance", - "shortName": "update_instance" - }, - "description": "Sample for UpdateInstance", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_instance", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "UpdateInstance" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.Instance" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Instance", - "shortName": "update_instance" - }, - "description": "Sample for UpdateInstance", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_logical_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateLogicalView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "UpdateLogicalView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest" - }, - { - "name": "logical_view", - "type": "google.cloud.bigtable_admin_v2.types.LogicalView" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_logical_view" - }, - "description": "Sample for UpdateLogicalView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_async", - "segments": [ - { - "end": 58, - "start": 27, - "type": "FULL" - }, - { - "end": 58, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 48, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 55, - "start": 49, - "type": "REQUEST_EXECUTION" - }, - { - "end": 59, - "start": 56, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_logical_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateLogicalView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "UpdateLogicalView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest" - }, - { - "name": "logical_view", - "type": "google.cloud.bigtable_admin_v2.types.LogicalView" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_logical_view" - }, - "description": "Sample for UpdateLogicalView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_sync", - "segments": [ - { - "end": 58, - "start": 27, - "type": "FULL" - }, - { - "end": 58, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 48, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 55, - "start": 49, - "type": "REQUEST_EXECUTION" - }, - { - "end": 59, - "start": 56, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", - "shortName": "BigtableInstanceAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_materialized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateMaterializedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "UpdateMaterializedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest" - }, - { - "name": "materialized_view", - "type": "google.cloud.bigtable_admin_v2.types.MaterializedView" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_materialized_view" - }, - "description": "Sample for UpdateMaterializedView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_async", - "segments": [ - { - "end": 58, - "start": 27, - "type": "FULL" - }, - { - "end": 58, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 48, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 55, - "start": 49, - "type": "REQUEST_EXECUTION" - }, - { - "end": 59, - "start": 56, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", - "shortName": "BigtableInstanceAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_materialized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateMaterializedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "shortName": "BigtableInstanceAdmin" - }, - "shortName": "UpdateMaterializedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest" - }, - { - "name": "materialized_view", - "type": "google.cloud.bigtable_admin_v2.types.MaterializedView" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_materialized_view" - }, - "description": "Sample for UpdateMaterializedView", - "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_sync", - "segments": [ - { - "end": 58, - "start": 27, - "type": "FULL" - }, - { - "end": 58, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 48, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 55, - "start": 49, - "type": "REQUEST_EXECUTION" - }, - { - "end": 59, - "start": 56, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.check_consistency", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CheckConsistency" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "consistency_token", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse", - "shortName": "check_consistency" - }, - "description": "Sample for CheckConsistency", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.check_consistency", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CheckConsistency" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "consistency_token", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse", - "shortName": "check_consistency" - }, - "description": "Sample for CheckConsistency", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.copy_backup", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CopyBackup" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CopyBackupRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "backup_id", - "type": "str" - }, - { - "name": "source_backup", - "type": "str" - }, - { - "name": "expire_time", - "type": "google.protobuf.timestamp_pb2.Timestamp" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "copy_backup" - }, - "description": "Sample for CopyBackup", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_async", - "segments": [ - { - "end": 57, - "start": 27, - "type": "FULL" - }, - { - "end": 57, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 47, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 54, - "start": 48, - "type": "REQUEST_EXECUTION" - }, - { - "end": 58, - "start": 55, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.copy_backup", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CopyBackup" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CopyBackupRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "backup_id", - "type": "str" - }, - { - "name": "source_backup", - "type": "str" - }, - { - "name": "expire_time", - "type": "google.protobuf.timestamp_pb2.Timestamp" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "copy_backup" - }, - "description": "Sample for CopyBackup", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_sync", - "segments": [ - { - "end": 57, - "start": 27, - "type": "FULL" - }, - { - "end": 57, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 47, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 54, - "start": 48, - "type": "REQUEST_EXECUTION" - }, - { - "end": 58, - "start": 55, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_authorized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CreateAuthorizedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "authorized_view", - "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView" - }, - { - "name": "authorized_view_id", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_authorized_view" - }, - "description": "Sample for CreateAuthorizedView", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_async", - "segments": [ - { - "end": 56, - "start": 27, - "type": "FULL" - }, - { - "end": 56, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 53, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 57, - "start": 54, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_authorized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CreateAuthorizedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "authorized_view", - "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView" - }, - { - "name": "authorized_view_id", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_authorized_view" - }, - "description": "Sample for CreateAuthorizedView", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_sync", - "segments": [ - { - "end": 56, - "start": 27, - "type": "FULL" - }, - { - "end": 56, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 53, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 57, - "start": 54, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_backup", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CreateBackup" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateBackupRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "backup_id", - "type": "str" - }, - { - "name": "backup", - "type": "google.cloud.bigtable_admin_v2.types.Backup" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_backup" - }, - "description": "Sample for CreateBackup", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_async", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_backup", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CreateBackup" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateBackupRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "backup_id", - "type": "str" - }, - { - "name": "backup", - "type": "google.cloud.bigtable_admin_v2.types.Backup" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_backup" - }, - "description": "Sample for CreateBackup", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_sync", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._create_schema_bundle", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CreateSchemaBundle" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "schema_bundle_id", - "type": "str" - }, - { - "name": "schema_bundle", - "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "_create_schema_bundle" - }, - "description": "Sample for CreateSchemaBundle", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_async_internal", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._create_schema_bundle", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CreateSchemaBundle" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "schema_bundle_id", - "type": "str" - }, - { - "name": "schema_bundle", - "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "_create_schema_bundle" - }, - "description": "Sample for CreateSchemaBundle", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_sync_internal", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_table_from_snapshot", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CreateTableFromSnapshot" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "table_id", - "type": "str" - }, - { - "name": "source_snapshot", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_table_from_snapshot" - }, - "description": "Sample for CreateTableFromSnapshot", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_async", - "segments": [ - { - "end": 57, - "start": 27, - "type": "FULL" - }, - { - "end": 57, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 47, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 54, - "start": 48, - "type": "REQUEST_EXECUTION" - }, - { - "end": 58, - "start": 55, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_table_from_snapshot", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CreateTableFromSnapshot" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "table_id", - "type": "str" - }, - { - "name": "source_snapshot", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_table_from_snapshot" - }, - "description": "Sample for CreateTableFromSnapshot", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_sync", - "segments": [ - { - "end": 57, - "start": 27, - "type": "FULL" - }, - { - "end": 57, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 47, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 54, - "start": 48, - "type": "REQUEST_EXECUTION" - }, - { - "end": 58, - "start": 55, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CreateTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateTableRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "table_id", - "type": "str" - }, - { - "name": "table", - "type": "google.cloud.bigtable_admin_v2.types.Table" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Table", - "shortName": "create_table" - }, - "description": "Sample for CreateTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "CreateTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.CreateTableRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "table_id", - "type": "str" - }, - { - "name": "table", - "type": "google.cloud.bigtable_admin_v2.types.Table" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Table", - "shortName": "create_table" - }, - "description": "Sample for CreateTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_authorized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "DeleteAuthorizedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_authorized_view" - }, - "description": "Sample for DeleteAuthorizedView", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_async", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_authorized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "DeleteAuthorizedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_authorized_view" - }, - "description": "Sample for DeleteAuthorizedView", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_sync", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_backup", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "DeleteBackup" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteBackupRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_backup" - }, - "description": "Sample for DeleteBackup", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_async", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_backup", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "DeleteBackup" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteBackupRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_backup" - }, - "description": "Sample for DeleteBackup", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_sync", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._delete_schema_bundle", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "DeleteSchemaBundle" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "_delete_schema_bundle" - }, - "description": "Sample for DeleteSchemaBundle", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_async_internal", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._delete_schema_bundle", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "DeleteSchemaBundle" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "_delete_schema_bundle" - }, - "description": "Sample for DeleteSchemaBundle", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_sync_internal", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._delete_snapshot", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "DeleteSnapshot" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "_delete_snapshot" - }, - "description": "Sample for DeleteSnapshot", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_async_internal", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._delete_snapshot", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "DeleteSnapshot" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "_delete_snapshot" - }, - "description": "Sample for DeleteSnapshot", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_sync_internal", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "DeleteTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteTableRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_table" - }, - "description": "Sample for DeleteTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_async", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "DeleteTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DeleteTableRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "delete_table" - }, - "description": "Sample for DeleteTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_sync", - "segments": [ - { - "end": 49, - "start": 27, - "type": "FULL" - }, - { - "end": 49, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.drop_row_range", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "DropRowRange" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DropRowRangeRequest" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "drop_row_range" - }, - "description": "Sample for DropRowRange", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_async", - "segments": [ - { - "end": 50, - "start": 27, - "type": "FULL" - }, - { - "end": 50, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 51, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.drop_row_range", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "DropRowRange" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.DropRowRangeRequest" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "shortName": "drop_row_range" - }, - "description": "Sample for DropRowRange", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_sync", - "segments": [ - { - "end": 50, - "start": 27, - "type": "FULL" - }, - { - "end": 50, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 51, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.generate_consistency_token", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GenerateConsistencyToken" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse", - "shortName": "generate_consistency_token" - }, - "description": "Sample for GenerateConsistencyToken", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.generate_consistency_token", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GenerateConsistencyToken" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse", - "shortName": "generate_consistency_token" - }, - "description": "Sample for GenerateConsistencyToken", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_authorized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GetAuthorizedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.AuthorizedView", - "shortName": "get_authorized_view" - }, - "description": "Sample for GetAuthorizedView", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_authorized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GetAuthorizedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.AuthorizedView", - "shortName": "get_authorized_view" - }, - "description": "Sample for GetAuthorizedView", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_backup", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetBackup", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GetBackup" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetBackupRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Backup", - "shortName": "get_backup" - }, - "description": "Sample for GetBackup", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_backup", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetBackup", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GetBackup" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetBackupRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Backup", - "shortName": "get_backup" - }, - "description": "Sample for GetBackup", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_iam_policy", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GetIamPolicy" - }, - "parameters": [ - { - "name": "request", - "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" - }, - { - "name": "resource", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.iam.v1.policy_pb2.Policy", - "shortName": "get_iam_policy" - }, - "description": "Sample for GetIamPolicy", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 41, - "start": 39, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 42, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_iam_policy", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GetIamPolicy" - }, - "parameters": [ - { - "name": "request", - "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" - }, - { - "name": "resource", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.iam.v1.policy_pb2.Policy", - "shortName": "get_iam_policy" - }, - "description": "Sample for GetIamPolicy", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 41, - "start": 39, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 42, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._get_schema_bundle", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GetSchemaBundle" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.SchemaBundle", - "shortName": "_get_schema_bundle" - }, - "description": "Sample for GetSchemaBundle", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_async_internal", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._get_schema_bundle", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GetSchemaBundle" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.SchemaBundle", - "shortName": "_get_schema_bundle" - }, - "description": "Sample for GetSchemaBundle", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_sync_internal", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._get_snapshot", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GetSnapshot" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetSnapshotRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Snapshot", - "shortName": "_get_snapshot" - }, - "description": "Sample for GetSnapshot", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_async_internal", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._get_snapshot", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GetSnapshot" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetSnapshotRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Snapshot", - "shortName": "_get_snapshot" - }, - "description": "Sample for GetSnapshot", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_sync_internal", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GetTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetTableRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Table", - "shortName": "get_table" - }, - "description": "Sample for GetTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_async", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "GetTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.GetTableRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Table", - "shortName": "get_table" - }, - "description": "Sample for GetTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_sync", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_authorized_views", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "ListAuthorizedViews" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsAsyncPager", - "shortName": "list_authorized_views" - }, - "description": "Sample for ListAuthorizedViews", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_authorized_views", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "ListAuthorizedViews" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsPager", - "shortName": "list_authorized_views" - }, - "description": "Sample for ListAuthorizedViews", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_backups", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListBackups", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "ListBackups" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListBackupsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager", - "shortName": "list_backups" - }, - "description": "Sample for ListBackups", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_backups", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListBackups", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "ListBackups" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListBackupsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager", - "shortName": "list_backups" - }, - "description": "Sample for ListBackups", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._list_schema_bundles", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "ListSchemaBundles" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesAsyncPager", - "shortName": "_list_schema_bundles" - }, - "description": "Sample for ListSchemaBundles", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_async_internal", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._list_schema_bundles", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "ListSchemaBundles" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesPager", - "shortName": "_list_schema_bundles" - }, - "description": "Sample for ListSchemaBundles", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_sync_internal", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._list_snapshots", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "ListSnapshots" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager", - "shortName": "_list_snapshots" - }, - "description": "Sample for ListSnapshots", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_async_internal", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._list_snapshots", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "ListSnapshots" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager", - "shortName": "_list_snapshots" - }, - "description": "Sample for ListSnapshots", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_sync_internal", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_tables", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListTables", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "ListTables" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListTablesRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager", - "shortName": "list_tables" - }, - "description": "Sample for ListTables", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_tables", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListTables", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "ListTables" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ListTablesRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesPager", - "shortName": "list_tables" - }, - "description": "Sample for ListTables", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._modify_column_families", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "ModifyColumnFamilies" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "modifications", - "type": "MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Table", - "shortName": "_modify_column_families" - }, - "description": "Sample for ModifyColumnFamilies", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_async_internal", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._modify_column_families", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "ModifyColumnFamilies" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "modifications", - "type": "MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Table", - "shortName": "_modify_column_families" - }, - "description": "Sample for ModifyColumnFamilies", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_sync_internal", - "segments": [ - { - "end": 51, - "start": 27, - "type": "FULL" - }, - { - "end": 51, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 48, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 52, - "start": 49, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._restore_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "RestoreTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.RestoreTableRequest" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "_restore_table" - }, - "description": "Sample for RestoreTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_async_internal", - "segments": [ - { - "end": 57, - "start": 27, - "type": "FULL" - }, - { - "end": 57, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 47, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 54, - "start": 48, - "type": "REQUEST_EXECUTION" - }, - { - "end": 58, - "start": 55, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._restore_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "RestoreTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.RestoreTableRequest" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "_restore_table" - }, - "description": "Sample for RestoreTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_sync_internal", - "segments": [ - { - "end": 57, - "start": 27, - "type": "FULL" - }, - { - "end": 57, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 47, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 54, - "start": 48, - "type": "REQUEST_EXECUTION" - }, - { - "end": 58, - "start": 55, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.set_iam_policy", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "SetIamPolicy" - }, - "parameters": [ - { - "name": "request", - "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" - }, - { - "name": "resource", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.iam.v1.policy_pb2.Policy", - "shortName": "set_iam_policy" - }, - "description": "Sample for SetIamPolicy", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_async", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 41, - "start": 39, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 42, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.set_iam_policy", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "SetIamPolicy" - }, - "parameters": [ - { - "name": "request", - "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" - }, - { - "name": "resource", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.iam.v1.policy_pb2.Policy", - "shortName": "set_iam_policy" - }, - "description": "Sample for SetIamPolicy", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_sync", - "segments": [ - { - "end": 52, - "start": 27, - "type": "FULL" - }, - { - "end": 52, - "start": 27, - "type": "SHORT" - }, - { - "end": 41, - "start": 39, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 46, - "start": 42, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 49, - "start": 47, - "type": "REQUEST_EXECUTION" - }, - { - "end": 53, - "start": 50, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._snapshot_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "SnapshotTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.SnapshotTableRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "cluster", - "type": "str" - }, - { - "name": "snapshot_id", - "type": "str" - }, - { - "name": "description", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "_snapshot_table" - }, - "description": "Sample for SnapshotTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_async_internal", - "segments": [ - { - "end": 57, - "start": 27, - "type": "FULL" - }, - { - "end": 57, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 47, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 54, - "start": 48, - "type": "REQUEST_EXECUTION" - }, - { - "end": 58, - "start": 55, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._snapshot_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "SnapshotTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.SnapshotTableRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "cluster", - "type": "str" - }, - { - "name": "snapshot_id", - "type": "str" - }, - { - "name": "description", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "_snapshot_table" - }, - "description": "Sample for SnapshotTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_sync_internal", - "segments": [ - { - "end": 57, - "start": 27, - "type": "FULL" - }, - { - "end": 57, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 47, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 54, - "start": 48, - "type": "REQUEST_EXECUTION" - }, - { - "end": 58, - "start": 55, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.test_iam_permissions", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "TestIamPermissions" - }, - "parameters": [ - { - "name": "request", - "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" - }, - { - "name": "resource", - "type": "str" - }, - { - "name": "permissions", - "type": "MutableSequence[str]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", - "shortName": "test_iam_permissions" - }, - "description": "Sample for TestIamPermissions", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_async", - "segments": [ - { - "end": 53, - "start": 27, - "type": "FULL" - }, - { - "end": 53, - "start": 27, - "type": "SHORT" - }, - { - "end": 41, - "start": 39, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 47, - "start": 42, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 50, - "start": 48, - "type": "REQUEST_EXECUTION" - }, - { - "end": 54, - "start": 51, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.test_iam_permissions", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "TestIamPermissions" - }, - "parameters": [ - { - "name": "request", - "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" - }, - { - "name": "resource", - "type": "str" - }, - { - "name": "permissions", - "type": "MutableSequence[str]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", - "shortName": "test_iam_permissions" - }, - "description": "Sample for TestIamPermissions", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_sync", - "segments": [ - { - "end": 53, - "start": 27, - "type": "FULL" - }, - { - "end": 53, - "start": 27, - "type": "SHORT" - }, - { - "end": 41, - "start": 39, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 47, - "start": 42, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 50, - "start": 48, - "type": "REQUEST_EXECUTION" - }, - { - "end": 54, - "start": 51, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.undelete_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "UndeleteTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UndeleteTableRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "undelete_table" - }, - "description": "Sample for UndeleteTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.undelete_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "UndeleteTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UndeleteTableRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "undelete_table" - }, - "description": "Sample for UndeleteTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_authorized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "UpdateAuthorizedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest" - }, - { - "name": "authorized_view", - "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_authorized_view" - }, - "description": "Sample for UpdateAuthorizedView", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_async", - "segments": [ - { - "end": 54, - "start": 27, - "type": "FULL" - }, - { - "end": 54, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 51, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 55, - "start": 52, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_authorized_view", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "UpdateAuthorizedView" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest" - }, - { - "name": "authorized_view", - "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_authorized_view" - }, - "description": "Sample for UpdateAuthorizedView", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_sync", - "segments": [ - { - "end": 54, - "start": 27, - "type": "FULL" - }, - { - "end": 54, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 51, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 55, - "start": 52, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_backup", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "UpdateBackup" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateBackupRequest" - }, - { - "name": "backup", - "type": "google.cloud.bigtable_admin_v2.types.Backup" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Backup", - "shortName": "update_backup" - }, - "description": "Sample for UpdateBackup", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_async", - "segments": [ - { - "end": 54, - "start": 27, - "type": "FULL" - }, - { - "end": 54, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 48, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 51, - "start": 49, - "type": "REQUEST_EXECUTION" - }, - { - "end": 55, - "start": 52, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_backup", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "UpdateBackup" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateBackupRequest" - }, - { - "name": "backup", - "type": "google.cloud.bigtable_admin_v2.types.Backup" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.cloud.bigtable_admin_v2.types.Backup", - "shortName": "update_backup" - }, - "description": "Sample for UpdateBackup", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_sync", - "segments": [ - { - "end": 54, - "start": 27, - "type": "FULL" - }, - { - "end": 54, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 48, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 51, - "start": 49, - "type": "REQUEST_EXECUTION" - }, - { - "end": 55, - "start": 52, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._update_schema_bundle", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "UpdateSchemaBundle" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest" - }, - { - "name": "schema_bundle", - "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "_update_schema_bundle" - }, - "description": "Sample for UpdateSchemaBundle", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_async_internal", - "segments": [ - { - "end": 58, - "start": 27, - "type": "FULL" - }, - { - "end": 58, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 48, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 55, - "start": 49, - "type": "REQUEST_EXECUTION" - }, - { - "end": 59, - "start": 56, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._update_schema_bundle", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "UpdateSchemaBundle" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest" - }, - { - "name": "schema_bundle", - "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "_update_schema_bundle" - }, - "description": "Sample for UpdateSchemaBundle", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync_internal.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_sync_internal", - "segments": [ - { - "end": 58, - "start": 27, - "type": "FULL" - }, - { - "end": 58, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 48, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 55, - "start": 49, - "type": "REQUEST_EXECUTION" - }, - { - "end": 59, - "start": 56, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync_internal.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", - "shortName": "BaseBigtableTableAdminAsyncClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "UpdateTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateTableRequest" - }, - { - "name": "table", - "type": "google.cloud.bigtable_admin_v2.types.Table" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_table" - }, - "description": "Sample for UpdateTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_async", - "segments": [ - { - "end": 54, - "start": 27, - "type": "FULL" - }, - { - "end": 54, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 51, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 55, - "start": 52, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", - "shortName": "BaseBigtableTableAdminClient" - }, - "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_table", - "method": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable", - "service": { - "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", - "shortName": "BigtableTableAdmin" - }, - "shortName": "UpdateTable" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.bigtable_admin_v2.types.UpdateTableRequest" - }, - { - "name": "table", - "type": "google.cloud.bigtable_admin_v2.types.Table" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, Union[str, bytes]]]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_table" - }, - "description": "Sample for UpdateTable", - "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_sync", - "segments": [ - { - "end": 54, - "start": 27, - "type": "FULL" - }, - { - "end": 54, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 44, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 51, - "start": 45, - "type": "REQUEST_EXECUTION" - }, - { - "end": 55, - "start": 52, - "type": "RESPONSE_HANDLING" - } - ], - "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py" - } - ] -} diff --git a/tests/unit/gapic/bigtable_admin_v2/__init__.py b/tests/unit/gapic/bigtable_admin_v2/__init__.py deleted file mode 100644 index cbf94b283..000000000 --- a/tests/unit/gapic/bigtable_admin_v2/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py deleted file mode 100644 index 2ad52bf52..000000000 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ /dev/null @@ -1,26520 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os - -# try/except added for compatibility with python < 3.8 -try: - from unittest import mock - from unittest.mock import AsyncMock # pragma: NO COVER -except ImportError: # pragma: NO COVER - import mock - -import grpc -from grpc.experimental import aio -from collections.abc import Iterable, AsyncIterable -from google.protobuf import json_format -import json -import math -import pytest -from google.api_core import api_core_version -from proto.marshal.rules.dates import DurationRule, TimestampRule -from proto.marshal.rules import wrappers -from requests import Response -from requests import Request, PreparedRequest -from requests.sessions import Session -from google.protobuf import json_format - -try: - from google.auth.aio import credentials as ga_credentials_async - - HAS_GOOGLE_AUTH_AIO = True -except ImportError: # pragma: NO COVER - HAS_GOOGLE_AUTH_AIO = False - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminAsyncClient, -) -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, -) -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import transports -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import common -from google.cloud.bigtable_admin_v2.types import instance -from google.cloud.bigtable_admin_v2.types import instance as gba_instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import options_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.type import expr_pb2 # type: ignore -import google.auth - - -CRED_INFO_JSON = { - "credential_source": "/path/to/file", - "credential_type": "service account credentials", - "principal": "service-account@example.com", -} -CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) - - -async def mock_async_gen(data, chunk_size=1): - for i in range(0, len(data)): # pragma: NO COVER - chunk = data[i : i + chunk_size] - yield chunk.encode("utf-8") - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded. -# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107. -def async_anonymous_credentials(): - if HAS_GOOGLE_AUTH_AIO: - return ga_credentials_async.AnonymousCredentials() - return ga_credentials.AnonymousCredentials() - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return ( - "foo.googleapis.com" - if ("localhost" in client.DEFAULT_ENDPOINT) - else client.DEFAULT_ENDPOINT - ) - - -# If default endpoint template is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint template so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint_template(client): - return ( - "test.{UNIVERSE_DOMAIN}" - if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) - else client._DEFAULT_ENDPOINT_TEMPLATE - ) - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert BigtableInstanceAdminClient._get_default_mtls_endpoint(None) is None - assert ( - BigtableInstanceAdminClient._get_default_mtls_endpoint(api_endpoint) - == api_mtls_endpoint - ) - assert ( - BigtableInstanceAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) - == api_mtls_endpoint - ) - assert ( - BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_endpoint) - == sandbox_mtls_endpoint - ) - assert ( - BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) - == sandbox_mtls_endpoint - ) - assert ( - BigtableInstanceAdminClient._get_default_mtls_endpoint(non_googleapi) - == non_googleapi - ) - - -def test__read_environment_variables(): - assert BigtableInstanceAdminClient._read_environment_variables() == ( - False, - "auto", - None, - ) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - assert BigtableInstanceAdminClient._read_environment_variables() == ( - True, - "auto", - None, - ) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - assert BigtableInstanceAdminClient._read_environment_variables() == ( - False, - "auto", - None, - ) - - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} - ): - with pytest.raises(ValueError) as excinfo: - BigtableInstanceAdminClient._read_environment_variables() - assert ( - str(excinfo.value) - == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - assert BigtableInstanceAdminClient._read_environment_variables() == ( - False, - "never", - None, - ) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - assert BigtableInstanceAdminClient._read_environment_variables() == ( - False, - "always", - None, - ) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): - assert BigtableInstanceAdminClient._read_environment_variables() == ( - False, - "auto", - None, - ) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - BigtableInstanceAdminClient._read_environment_variables() - assert ( - str(excinfo.value) - == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - ) - - with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): - assert BigtableInstanceAdminClient._read_environment_variables() == ( - False, - "auto", - "foo.com", - ) - - -def test__get_client_cert_source(): - mock_provided_cert_source = mock.Mock() - mock_default_cert_source = mock.Mock() - - assert BigtableInstanceAdminClient._get_client_cert_source(None, False) is None - assert ( - BigtableInstanceAdminClient._get_client_cert_source( - mock_provided_cert_source, False - ) - is None - ) - assert ( - BigtableInstanceAdminClient._get_client_cert_source( - mock_provided_cert_source, True - ) - == mock_provided_cert_source - ) - - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", return_value=True - ): - with mock.patch( - "google.auth.transport.mtls.default_client_cert_source", - return_value=mock_default_cert_source, - ): - assert ( - BigtableInstanceAdminClient._get_client_cert_source(None, True) - is mock_default_cert_source - ) - assert ( - BigtableInstanceAdminClient._get_client_cert_source( - mock_provided_cert_source, "true" - ) - is mock_provided_cert_source - ) - - -@mock.patch.object( - BigtableInstanceAdminClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableInstanceAdminClient), -) -@mock.patch.object( - BigtableInstanceAdminAsyncClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), -) -def test__get_api_endpoint(): - api_override = "foo.com" - mock_client_cert_source = mock.Mock() - default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE - default_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=default_universe - ) - mock_universe = "bar.com" - mock_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=mock_universe - ) - - assert ( - BigtableInstanceAdminClient._get_api_endpoint( - api_override, mock_client_cert_source, default_universe, "always" - ) - == api_override - ) - assert ( - BigtableInstanceAdminClient._get_api_endpoint( - None, mock_client_cert_source, default_universe, "auto" - ) - == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT - ) - assert ( - BigtableInstanceAdminClient._get_api_endpoint( - None, None, default_universe, "auto" - ) - == default_endpoint - ) - assert ( - BigtableInstanceAdminClient._get_api_endpoint( - None, None, default_universe, "always" - ) - == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT - ) - assert ( - BigtableInstanceAdminClient._get_api_endpoint( - None, mock_client_cert_source, default_universe, "always" - ) - == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT - ) - assert ( - BigtableInstanceAdminClient._get_api_endpoint( - None, None, mock_universe, "never" - ) - == mock_endpoint - ) - assert ( - BigtableInstanceAdminClient._get_api_endpoint( - None, None, default_universe, "never" - ) - == default_endpoint - ) - - with pytest.raises(MutualTLSChannelError) as excinfo: - BigtableInstanceAdminClient._get_api_endpoint( - None, mock_client_cert_source, mock_universe, "auto" - ) - assert ( - str(excinfo.value) - == "mTLS is not supported in any universe other than googleapis.com." - ) - - -def test__get_universe_domain(): - client_universe_domain = "foo.com" - universe_domain_env = "bar.com" - - assert ( - BigtableInstanceAdminClient._get_universe_domain( - client_universe_domain, universe_domain_env - ) - == client_universe_domain - ) - assert ( - BigtableInstanceAdminClient._get_universe_domain(None, universe_domain_env) - == universe_domain_env - ) - assert ( - BigtableInstanceAdminClient._get_universe_domain(None, None) - == BigtableInstanceAdminClient._DEFAULT_UNIVERSE - ) - - with pytest.raises(ValueError) as excinfo: - BigtableInstanceAdminClient._get_universe_domain("", None) - assert str(excinfo.value) == "Universe Domain cannot be an empty string." - - -@pytest.mark.parametrize( - "error_code,cred_info_json,show_cred_info", - [ - (401, CRED_INFO_JSON, True), - (403, CRED_INFO_JSON, True), - (404, CRED_INFO_JSON, True), - (500, CRED_INFO_JSON, False), - (401, None, False), - (403, None, False), - (404, None, False), - (500, None, False), - ], -) -def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): - cred = mock.Mock(["get_cred_info"]) - cred.get_cred_info = mock.Mock(return_value=cred_info_json) - client = BigtableInstanceAdminClient(credentials=cred) - client._transport._credentials = cred - - error = core_exceptions.GoogleAPICallError("message", details=["foo"]) - error.code = error_code - - client._add_cred_info_for_auth_errors(error) - if show_cred_info: - assert error.details == ["foo", CRED_INFO_STRING] - else: - assert error.details == ["foo"] - - -@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) -def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): - cred = mock.Mock([]) - assert not hasattr(cred, "get_cred_info") - client = BigtableInstanceAdminClient(credentials=cred) - client._transport._credentials = cred - - error = core_exceptions.GoogleAPICallError("message", details=[]) - error.code = error_code - - client._add_cred_info_for_auth_errors(error) - assert error.details == [] - - -@pytest.mark.parametrize( - "client_class,transport_name", - [ - (BigtableInstanceAdminClient, "grpc"), - (BigtableInstanceAdminAsyncClient, "grpc_asyncio"), - (BigtableInstanceAdminClient, "rest"), - ], -) -def test_bigtable_instance_admin_client_from_service_account_info( - client_class, transport_name -): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object( - service_account.Credentials, "from_service_account_info" - ) as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info, transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - "bigtableadmin.googleapis.com:443" - if transport_name in ["grpc", "grpc_asyncio"] - else "https://bigtableadmin.googleapis.com" - ) - - -@pytest.mark.parametrize( - "transport_class,transport_name", - [ - (transports.BigtableInstanceAdminGrpcTransport, "grpc"), - (transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio"), - (transports.BigtableInstanceAdminRestTransport, "rest"), - ], -) -def test_bigtable_instance_admin_client_service_account_always_use_jwt( - transport_class, transport_name -): - with mock.patch.object( - service_account.Credentials, "with_always_use_jwt_access", create=True - ) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object( - service_account.Credentials, "with_always_use_jwt_access", create=True - ) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize( - "client_class,transport_name", - [ - (BigtableInstanceAdminClient, "grpc"), - (BigtableInstanceAdminAsyncClient, "grpc_asyncio"), - (BigtableInstanceAdminClient, "rest"), - ], -) -def test_bigtable_instance_admin_client_from_service_account_file( - client_class, transport_name -): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object( - service_account.Credentials, "from_service_account_file" - ) as factory: - factory.return_value = creds - client = client_class.from_service_account_file( - "dummy/file/path.json", transport=transport_name - ) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json( - "dummy/file/path.json", transport=transport_name - ) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - "bigtableadmin.googleapis.com:443" - if transport_name in ["grpc", "grpc_asyncio"] - else "https://bigtableadmin.googleapis.com" - ) - - -def test_bigtable_instance_admin_client_get_transport_class(): - transport = BigtableInstanceAdminClient.get_transport_class() - available_transports = [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminRestTransport, - ] - assert transport in available_transports - - transport = BigtableInstanceAdminClient.get_transport_class("grpc") - assert transport == transports.BigtableInstanceAdminGrpcTransport - - -@pytest.mark.parametrize( - "client_class,transport_class,transport_name", - [ - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminGrpcTransport, - "grpc", - ), - ( - BigtableInstanceAdminAsyncClient, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - "grpc_asyncio", - ), - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminRestTransport, - "rest", - ), - ], -) -@mock.patch.object( - BigtableInstanceAdminClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableInstanceAdminClient), -) -@mock.patch.object( - BigtableInstanceAdminAsyncClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), -) -def test_bigtable_instance_admin_client_client_options( - client_class, transport_class, transport_name -): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(BigtableInstanceAdminClient, "get_transport_class") as gtc: - transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(BigtableInstanceAdminClient, "get_transport_class") as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - client = client_class(transport=transport_name) - assert ( - str(excinfo.value) - == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - ) - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} - ): - with pytest.raises(ValueError) as excinfo: - client = client_class(transport=transport_name) - assert ( - str(excinfo.value) - == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - # Check the case api_endpoint is provided - options = client_options.ClientOptions( - api_audience="https://language.googleapis.com" - ) - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience="https://language.googleapis.com", - ) - - -@pytest.mark.parametrize( - "client_class,transport_class,transport_name,use_client_cert_env", - [ - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminGrpcTransport, - "grpc", - "true", - ), - ( - BigtableInstanceAdminAsyncClient, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - "grpc_asyncio", - "true", - ), - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminGrpcTransport, - "grpc", - "false", - ), - ( - BigtableInstanceAdminAsyncClient, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - "grpc_asyncio", - "false", - ), - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminRestTransport, - "rest", - "true", - ), - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminRestTransport, - "rest", - "false", - ), - ], -) -@mock.patch.object( - BigtableInstanceAdminClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableInstanceAdminClient), -) -@mock.patch.object( - BigtableInstanceAdminAsyncClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), -) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_bigtable_instance_admin_client_mtls_env_auto( - client_class, transport_class, transport_name, use_client_cert_env -): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - options = client_options.ClientOptions( - client_cert_source=client_cert_source_callback - ) - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ) - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", - return_value=True, - ): - with mock.patch( - "google.auth.transport.mtls.default_client_cert_source", - return_value=client_cert_source_callback, - ): - if use_client_cert_env == "false": - expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ) - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", - return_value=False, - ): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize( - "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient] -) -@mock.patch.object( - BigtableInstanceAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminClient), -) -@mock.patch.object( - BigtableInstanceAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminAsyncClient), -) -def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client_class): - mock_client_cert_source = mock.Mock() - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - mock_api_endpoint = "foo" - options = client_options.ClientOptions( - client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint - ) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( - options - ) - assert api_endpoint == mock_api_endpoint - assert cert_source == mock_client_cert_source - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - mock_client_cert_source = mock.Mock() - mock_api_endpoint = "foo" - options = client_options.ClientOptions( - client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint - ) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( - options - ) - assert api_endpoint == mock_api_endpoint - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", - return_value=False, - ): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", - return_value=True, - ): - with mock.patch( - "google.auth.transport.mtls.default_client_cert_source", - return_value=mock_client_cert_source, - ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source == mock_client_cert_source - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - client_class.get_mtls_endpoint_and_cert_source() - - assert ( - str(excinfo.value) - == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - ) - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} - ): - with pytest.raises(ValueError) as excinfo: - client_class.get_mtls_endpoint_and_cert_source() - - assert ( - str(excinfo.value) - == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - - -@pytest.mark.parametrize( - "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient] -) -@mock.patch.object( - BigtableInstanceAdminClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableInstanceAdminClient), -) -@mock.patch.object( - BigtableInstanceAdminAsyncClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), -) -def test_bigtable_instance_admin_client_client_api_endpoint(client_class): - mock_client_cert_source = client_cert_source_callback - api_override = "foo.com" - default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE - default_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=default_universe - ) - mock_universe = "bar.com" - mock_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=mock_universe - ) - - # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", - # use ClientOptions.api_endpoint as the api endpoint regardless. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch( - "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" - ): - options = client_options.ClientOptions( - client_cert_source=mock_client_cert_source, api_endpoint=api_override - ) - client = client_class( - client_options=options, - credentials=ga_credentials.AnonymousCredentials(), - ) - assert client.api_endpoint == api_override - - # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - client = client_class(credentials=ga_credentials.AnonymousCredentials()) - assert client.api_endpoint == default_endpoint - - # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", - # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - client = client_class(credentials=ga_credentials.AnonymousCredentials()) - assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - - # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), - # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, - # and ClientOptions.universe_domain="bar.com", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. - options = client_options.ClientOptions() - universe_exists = hasattr(options, "universe_domain") - if universe_exists: - options = client_options.ClientOptions(universe_domain=mock_universe) - client = client_class( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) - else: - client = client_class( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) - assert client.api_endpoint == ( - mock_endpoint if universe_exists else default_endpoint - ) - assert client.universe_domain == ( - mock_universe if universe_exists else default_universe - ) - - # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. - options = client_options.ClientOptions() - if hasattr(options, "universe_domain"): - delattr(options, "universe_domain") - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - client = client_class( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) - assert client.api_endpoint == default_endpoint - - -@pytest.mark.parametrize( - "client_class,transport_class,transport_name", - [ - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminGrpcTransport, - "grpc", - ), - ( - BigtableInstanceAdminAsyncClient, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - "grpc_asyncio", - ), - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminRestTransport, - "rest", - ), - ], -) -def test_bigtable_instance_admin_client_client_options_scopes( - client_class, transport_class, transport_name -): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize( - "client_class,transport_class,transport_name,grpc_helpers", - [ - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminGrpcTransport, - "grpc", - grpc_helpers, - ), - ( - BigtableInstanceAdminAsyncClient, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - "grpc_asyncio", - grpc_helpers_async, - ), - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminRestTransport, - "rest", - None, - ), - ], -) -def test_bigtable_instance_admin_client_client_options_credentials_file( - client_class, transport_class, transport_name, grpc_helpers -): - # Check the case credentials file is provided. - options = client_options.ClientOptions(credentials_file="credentials.json") - - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -def test_bigtable_instance_admin_client_client_options_from_dict(): - with mock.patch( - "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminGrpcTransport.__init__" - ) as grpc_transport: - grpc_transport.return_value = None - client = BigtableInstanceAdminClient( - client_options={"api_endpoint": "squid.clam.whelk"} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize( - "client_class,transport_class,transport_name,grpc_helpers", - [ - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminGrpcTransport, - "grpc", - grpc_helpers, - ), - ( - BigtableInstanceAdminAsyncClient, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - "grpc_asyncio", - grpc_helpers_async, - ), - ], -) -def test_bigtable_instance_admin_client_create_channel_credentials_file( - client_class, transport_class, transport_name, grpc_helpers -): - # Check the case credentials file is provided. - options = client_options.ClientOptions(credentials_file="credentials.json") - - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # test that the credentials from file are saved and used as the credentials. - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel" - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - file_creds = ga_credentials.AnonymousCredentials() - load_creds.return_value = (file_creds, None) - adc.return_value = (creds, None) - client = client_class(client_options=options, transport=transport_name) - create_channel.assert_called_with( - "bigtableadmin.googleapis.com:443", - credentials=file_creds, - credentials_file=None, - quota_project_id=None, - default_scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - scopes=None, - default_host="bigtableadmin.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateInstanceRequest, - dict, - ], -) -def test_create_instance(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.CreateInstanceRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_instance_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.CreateInstanceRequest( - parent="parent_value", - instance_id="instance_id_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_instance(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest( - parent="parent_value", - instance_id="instance_id_value", - ) - - -def test_create_instance_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.create_instance in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.create_instance] = mock_rpc - request = {} - client.create_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_instance_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.create_instance - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.create_instance - ] = mock_rpc - - request = {} - await client.create_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.create_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_instance_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.CreateInstanceRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.create_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.CreateInstanceRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_instance_async_from_dict(): - await test_create_instance_async(request_type=dict) - - -def test_create_instance_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateInstanceRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_create_instance_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateInstanceRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.create_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_create_instance_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_instance( - parent="parent_value", - instance_id="instance_id_value", - instance=gba_instance.Instance(name="name_value"), - clusters={"key_value": gba_instance.Cluster(name="name_value")}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].instance_id - mock_val = "instance_id_value" - assert arg == mock_val - arg = args[0].instance - mock_val = gba_instance.Instance(name="name_value") - assert arg == mock_val - arg = args[0].clusters - mock_val = {"key_value": gba_instance.Cluster(name="name_value")} - assert arg == mock_val - - -def test_create_instance_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_instance( - bigtable_instance_admin.CreateInstanceRequest(), - parent="parent_value", - instance_id="instance_id_value", - instance=gba_instance.Instance(name="name_value"), - clusters={"key_value": gba_instance.Cluster(name="name_value")}, - ) - - -@pytest.mark.asyncio -async def test_create_instance_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_instance( - parent="parent_value", - instance_id="instance_id_value", - instance=gba_instance.Instance(name="name_value"), - clusters={"key_value": gba_instance.Cluster(name="name_value")}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].instance_id - mock_val = "instance_id_value" - assert arg == mock_val - arg = args[0].instance - mock_val = gba_instance.Instance(name="name_value") - assert arg == mock_val - arg = args[0].clusters - mock_val = {"key_value": gba_instance.Cluster(name="name_value")} - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_instance_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_instance( - bigtable_instance_admin.CreateInstanceRequest(), - parent="parent_value", - instance_id="instance_id_value", - instance=gba_instance.Instance(name="name_value"), - clusters={"key_value": gba_instance.Cluster(name="name_value")}, - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetInstanceRequest, - dict, - ], -) -def test_get_instance(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - satisfies_pzi=True, - ) - response = client.get_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.GetInstanceRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - assert response.satisfies_pzi is True - - -def test_get_instance_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.GetInstanceRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_instance(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest( - name="name_value", - ) - - -def test_get_instance_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_instance in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_instance] = mock_rpc - request = {} - client.get_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_instance_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.get_instance - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.get_instance - ] = mock_rpc - - request = {} - await client.get_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.get_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_instance_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.GetInstanceRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - satisfies_pzi=True, - ) - ) - response = await client.get_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.GetInstanceRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - assert response.satisfies_pzi is True - - -@pytest.mark.asyncio -async def test_get_instance_async_from_dict(): - await test_get_instance_async(request_type=dict) - - -def test_get_instance_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetInstanceRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - call.return_value = instance.Instance() - client.get_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_get_instance_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetInstanceRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) - await client.get_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_get_instance_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Instance() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_instance( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_get_instance_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_instance( - bigtable_instance_admin.GetInstanceRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_get_instance_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Instance() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_instance( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_instance_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_instance( - bigtable_instance_admin.GetInstanceRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListInstancesRequest, - dict, - ], -) -def test_list_instances(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListInstancesResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - response = client.list_instances(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.ListInstancesRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response.raw_page is response - assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" - - -def test_list_instances_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.ListInstancesRequest( - parent="parent_value", - page_token="page_token_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_instances(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest( - parent="parent_value", - page_token="page_token_value", - ) - - -def test_list_instances_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_instances in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.list_instances] = mock_rpc - request = {} - client.list_instances(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_instances(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_instances_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.list_instances - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.list_instances - ] = mock_rpc - - request = {} - await client.list_instances(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.list_instances(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_instances_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.ListInstancesRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListInstancesResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - ) - response = await client.list_instances(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.ListInstancesRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.asyncio -async def test_list_instances_async_from_dict(): - await test_list_instances_async(request_type=dict) - - -def test_list_instances_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListInstancesRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - call.return_value = bigtable_instance_admin.ListInstancesResponse() - client.list_instances(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_list_instances_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListInstancesRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListInstancesResponse() - ) - await client.list_instances(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_list_instances_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListInstancesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_instances( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -def test_list_instances_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_instances( - bigtable_instance_admin.ListInstancesRequest(), - parent="parent_value", - ) - - -@pytest.mark.asyncio -async def test_list_instances_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListInstancesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListInstancesResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_instances( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_instances_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_instances( - bigtable_instance_admin.ListInstancesRequest(), - parent="parent_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - instance.Instance, - dict, - ], -) -def test_update_instance(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - satisfies_pzi=True, - ) - response = client.update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = instance.Instance() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - assert response.satisfies_pzi is True - - -def test_update_instance_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = instance.Instance( - name="name_value", - display_name="display_name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_instance(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance( - name="name_value", - display_name="display_name_value", - ) - - -def test_update_instance_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.update_instance in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.update_instance] = mock_rpc - request = {} - client.update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.update_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_instance_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.update_instance - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.update_instance - ] = mock_rpc - - request = {} - await client.update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.update_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_instance_async( - transport: str = "grpc_asyncio", request_type=instance.Instance -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - satisfies_pzi=True, - ) - ) - response = await client.update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = instance.Instance() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - assert response.satisfies_pzi is True - - -@pytest.mark.asyncio -async def test_update_instance_async_from_dict(): - await test_update_instance_async(request_type=dict) - - -def test_update_instance_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = instance.Instance() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - call.return_value = instance.Instance() - client.update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_update_instance_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = instance.Instance() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) - await client.update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.PartialUpdateInstanceRequest, - dict, - ], -) -def test_partial_update_instance(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.partial_update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.PartialUpdateInstanceRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_partial_update_instance_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.PartialUpdateInstanceRequest() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.partial_update_instance(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() - - -def test_partial_update_instance_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.partial_update_instance - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.partial_update_instance - ] = mock_rpc - request = {} - client.partial_update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.partial_update_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_partial_update_instance_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.partial_update_instance - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.partial_update_instance - ] = mock_rpc - - request = {} - await client.partial_update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.partial_update_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_partial_update_instance_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.partial_update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.PartialUpdateInstanceRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_partial_update_instance_async_from_dict(): - await test_partial_update_instance_async(request_type=dict) - - -def test_partial_update_instance_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.PartialUpdateInstanceRequest() - - request.instance.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.partial_update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "instance.name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_partial_update_instance_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.PartialUpdateInstanceRequest() - - request.instance.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.partial_update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "instance.name=name_value", - ) in kw["metadata"] - - -def test_partial_update_instance_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.partial_update_instance( - instance=gba_instance.Instance(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].instance - mock_val = gba_instance.Instance(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -def test_partial_update_instance_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.partial_update_instance( - bigtable_instance_admin.PartialUpdateInstanceRequest(), - instance=gba_instance.Instance(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.asyncio -async def test_partial_update_instance_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.partial_update_instance( - instance=gba_instance.Instance(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].instance - mock_val = gba_instance.Instance(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_partial_update_instance_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.partial_update_instance( - bigtable_instance_admin.PartialUpdateInstanceRequest(), - instance=gba_instance.Instance(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteInstanceRequest, - dict, - ], -) -def test_delete_instance(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.DeleteInstanceRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_instance_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.DeleteInstanceRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_instance(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest( - name="name_value", - ) - - -def test_delete_instance_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.delete_instance in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.delete_instance] = mock_rpc - request = {} - client.delete_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_instance_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.delete_instance - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.delete_instance - ] = mock_rpc - - request = {} - await client.delete_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.delete_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_instance_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.DeleteInstanceRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.DeleteInstanceRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_instance_async_from_dict(): - await test_delete_instance_async(request_type=dict) - - -def test_delete_instance_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteInstanceRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - call.return_value = None - client.delete_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_delete_instance_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteInstanceRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_delete_instance_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_instance( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_delete_instance_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_instance( - bigtable_instance_admin.DeleteInstanceRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_delete_instance_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_instance( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_instance_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_instance( - bigtable_instance_admin.DeleteInstanceRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateClusterRequest, - dict, - ], -) -def test_create_cluster(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.CreateClusterRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_cluster_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.CreateClusterRequest( - parent="parent_value", - cluster_id="cluster_id_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_cluster(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest( - parent="parent_value", - cluster_id="cluster_id_value", - ) - - -def test_create_cluster_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.create_cluster in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc - request = {} - client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_cluster_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.create_cluster - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.create_cluster - ] = mock_rpc - - request = {} - await client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.create_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_cluster_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.CreateClusterRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.CreateClusterRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_cluster_async_from_dict(): - await test_create_cluster_async(request_type=dict) - - -def test_create_cluster_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateClusterRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_create_cluster_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateClusterRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_create_cluster_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_cluster( - parent="parent_value", - cluster_id="cluster_id_value", - cluster=instance.Cluster(name="name_value"), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].cluster_id - mock_val = "cluster_id_value" - assert arg == mock_val - arg = args[0].cluster - mock_val = instance.Cluster(name="name_value") - assert arg == mock_val - - -def test_create_cluster_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_cluster( - bigtable_instance_admin.CreateClusterRequest(), - parent="parent_value", - cluster_id="cluster_id_value", - cluster=instance.Cluster(name="name_value"), - ) - - -@pytest.mark.asyncio -async def test_create_cluster_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_cluster( - parent="parent_value", - cluster_id="cluster_id_value", - cluster=instance.Cluster(name="name_value"), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].cluster_id - mock_val = "cluster_id_value" - assert arg == mock_val - arg = args[0].cluster - mock_val = instance.Cluster(name="name_value") - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_cluster_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_cluster( - bigtable_instance_admin.CreateClusterRequest(), - parent="parent_value", - cluster_id="cluster_id_value", - cluster=instance.Cluster(name="name_value"), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetClusterRequest, - dict, - ], -) -def test_get_cluster(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Cluster( - name="name_value", - location="location_value", - state=instance.Cluster.State.READY, - serve_nodes=1181, - node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, - default_storage_type=common.StorageType.SSD, - ) - response = client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.GetClusterRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Cluster) - assert response.name == "name_value" - assert response.location == "location_value" - assert response.state == instance.Cluster.State.READY - assert response.serve_nodes == 1181 - assert ( - response.node_scaling_factor - == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X - ) - assert response.default_storage_type == common.StorageType.SSD - - -def test_get_cluster_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.GetClusterRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_cluster(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest( - name="name_value", - ) - - -def test_get_cluster_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_cluster in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_cluster] = mock_rpc - request = {} - client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_cluster_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.get_cluster - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.get_cluster - ] = mock_rpc - - request = {} - await client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.get_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_cluster_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.GetClusterRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.Cluster( - name="name_value", - location="location_value", - state=instance.Cluster.State.READY, - serve_nodes=1181, - node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, - default_storage_type=common.StorageType.SSD, - ) - ) - response = await client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.GetClusterRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Cluster) - assert response.name == "name_value" - assert response.location == "location_value" - assert response.state == instance.Cluster.State.READY - assert response.serve_nodes == 1181 - assert ( - response.node_scaling_factor - == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X - ) - assert response.default_storage_type == common.StorageType.SSD - - -@pytest.mark.asyncio -async def test_get_cluster_async_from_dict(): - await test_get_cluster_async(request_type=dict) - - -def test_get_cluster_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetClusterRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - call.return_value = instance.Cluster() - client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_get_cluster_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetClusterRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster()) - await client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_get_cluster_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Cluster() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_cluster( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_get_cluster_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_cluster( - bigtable_instance_admin.GetClusterRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_get_cluster_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Cluster() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_cluster( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_cluster_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_cluster( - bigtable_instance_admin.GetClusterRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListClustersRequest, - dict, - ], -) -def test_list_clusters(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListClustersResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - response = client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.ListClustersRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response.raw_page is response - assert isinstance(response, bigtable_instance_admin.ListClustersResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" - - -def test_list_clusters_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.ListClustersRequest( - parent="parent_value", - page_token="page_token_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_clusters(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest( - parent="parent_value", - page_token="page_token_value", - ) - - -def test_list_clusters_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_clusters in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.list_clusters] = mock_rpc - request = {} - client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_clusters(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_clusters_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.list_clusters - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.list_clusters - ] = mock_rpc - - request = {} - await client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.list_clusters(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_clusters_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.ListClustersRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListClustersResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - ) - response = await client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.ListClustersRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListClustersResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.asyncio -async def test_list_clusters_async_from_dict(): - await test_list_clusters_async(request_type=dict) - - -def test_list_clusters_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListClustersRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - call.return_value = bigtable_instance_admin.ListClustersResponse() - client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_list_clusters_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListClustersRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListClustersResponse() - ) - await client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_list_clusters_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListClustersResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_clusters( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -def test_list_clusters_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_clusters( - bigtable_instance_admin.ListClustersRequest(), - parent="parent_value", - ) - - -@pytest.mark.asyncio -async def test_list_clusters_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListClustersResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListClustersResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_clusters( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_clusters_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_clusters( - bigtable_instance_admin.ListClustersRequest(), - parent="parent_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - instance.Cluster, - dict, - ], -) -def test_update_cluster(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = instance.Cluster() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_cluster_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = instance.Cluster( - name="name_value", - location="location_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_cluster(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster( - name="name_value", - location="location_value", - ) - - -def test_update_cluster_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.update_cluster in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc - request = {} - client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_cluster_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.update_cluster - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.update_cluster - ] = mock_rpc - - request = {} - await client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.update_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_cluster_async( - transport: str = "grpc_asyncio", request_type=instance.Cluster -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = instance.Cluster() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_cluster_async_from_dict(): - await test_update_cluster_async(request_type=dict) - - -def test_update_cluster_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = instance.Cluster() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_update_cluster_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = instance.Cluster() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.PartialUpdateClusterRequest, - dict, - ], -) -def test_partial_update_cluster(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.partial_update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.PartialUpdateClusterRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_partial_update_cluster_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.PartialUpdateClusterRequest() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.partial_update_cluster(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() - - -def test_partial_update_cluster_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.partial_update_cluster - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.partial_update_cluster - ] = mock_rpc - request = {} - client.partial_update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.partial_update_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_partial_update_cluster_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.partial_update_cluster - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.partial_update_cluster - ] = mock_rpc - - request = {} - await client.partial_update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.partial_update_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_partial_update_cluster_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.PartialUpdateClusterRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.partial_update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.PartialUpdateClusterRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_partial_update_cluster_async_from_dict(): - await test_partial_update_cluster_async(request_type=dict) - - -def test_partial_update_cluster_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.PartialUpdateClusterRequest() - - request.cluster.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.partial_update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "cluster.name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_partial_update_cluster_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.PartialUpdateClusterRequest() - - request.cluster.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.partial_update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "cluster.name=name_value", - ) in kw["metadata"] - - -def test_partial_update_cluster_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.partial_update_cluster( - cluster=instance.Cluster(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].cluster - mock_val = instance.Cluster(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -def test_partial_update_cluster_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.partial_update_cluster( - bigtable_instance_admin.PartialUpdateClusterRequest(), - cluster=instance.Cluster(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.asyncio -async def test_partial_update_cluster_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.partial_update_cluster( - cluster=instance.Cluster(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].cluster - mock_val = instance.Cluster(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_partial_update_cluster_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.partial_update_cluster( - bigtable_instance_admin.PartialUpdateClusterRequest(), - cluster=instance.Cluster(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteClusterRequest, - dict, - ], -) -def test_delete_cluster(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.DeleteClusterRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_cluster_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.DeleteClusterRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_cluster(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest( - name="name_value", - ) - - -def test_delete_cluster_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.delete_cluster in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.delete_cluster] = mock_rpc - request = {} - client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_cluster_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.delete_cluster - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.delete_cluster - ] = mock_rpc - - request = {} - await client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.delete_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_cluster_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.DeleteClusterRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.DeleteClusterRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_cluster_async_from_dict(): - await test_delete_cluster_async(request_type=dict) - - -def test_delete_cluster_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteClusterRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - call.return_value = None - client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_delete_cluster_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteClusterRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_delete_cluster_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_cluster( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_delete_cluster_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_cluster( - bigtable_instance_admin.DeleteClusterRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_delete_cluster_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_cluster( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_cluster_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_cluster( - bigtable_instance_admin.DeleteClusterRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateAppProfileRequest, - dict, - ], -) -def test_create_app_profile(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - priority=instance.AppProfile.Priority.PRIORITY_LOW, - ) - response = client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.CreateAppProfileRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" - - -def test_create_app_profile_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.CreateAppProfileRequest( - parent="parent_value", - app_profile_id="app_profile_id_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_app_profile(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest( - parent="parent_value", - app_profile_id="app_profile_id_value", - ) - - -def test_create_app_profile_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_app_profile in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_app_profile - ] = mock_rpc - request = {} - client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.create_app_profile(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_app_profile_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.create_app_profile - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.create_app_profile - ] = mock_rpc - - request = {} - await client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.create_app_profile(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_app_profile_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.CreateAppProfileRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - ) - ) - response = await client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.CreateAppProfileRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" - - -@pytest.mark.asyncio -async def test_create_app_profile_async_from_dict(): - await test_create_app_profile_async(request_type=dict) - - -def test_create_app_profile_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateAppProfileRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - call.return_value = instance.AppProfile() - client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_create_app_profile_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateAppProfileRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) - await client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_create_app_profile_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = instance.AppProfile() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_app_profile( - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=instance.AppProfile(name="name_value"), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = "app_profile_id_value" - assert arg == mock_val - arg = args[0].app_profile - mock_val = instance.AppProfile(name="name_value") - assert arg == mock_val - - -def test_create_app_profile_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_app_profile( - bigtable_instance_admin.CreateAppProfileRequest(), - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=instance.AppProfile(name="name_value"), - ) - - -@pytest.mark.asyncio -async def test_create_app_profile_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = instance.AppProfile() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_app_profile( - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=instance.AppProfile(name="name_value"), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = "app_profile_id_value" - assert arg == mock_val - arg = args[0].app_profile - mock_val = instance.AppProfile(name="name_value") - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_app_profile_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_app_profile( - bigtable_instance_admin.CreateAppProfileRequest(), - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=instance.AppProfile(name="name_value"), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetAppProfileRequest, - dict, - ], -) -def test_get_app_profile(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - priority=instance.AppProfile.Priority.PRIORITY_LOW, - ) - response = client.get_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.GetAppProfileRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" - - -def test_get_app_profile_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.GetAppProfileRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_app_profile(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest( - name="name_value", - ) - - -def test_get_app_profile_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_app_profile in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_app_profile] = mock_rpc - request = {} - client.get_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_app_profile(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_app_profile_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.get_app_profile - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.get_app_profile - ] = mock_rpc - - request = {} - await client.get_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.get_app_profile(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_app_profile_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.GetAppProfileRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - ) - ) - response = await client.get_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.GetAppProfileRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" - - -@pytest.mark.asyncio -async def test_get_app_profile_async_from_dict(): - await test_get_app_profile_async(request_type=dict) - - -def test_get_app_profile_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetAppProfileRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - call.return_value = instance.AppProfile() - client.get_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_get_app_profile_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetAppProfileRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) - await client.get_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_get_app_profile_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.AppProfile() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_app_profile( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_get_app_profile_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_app_profile( - bigtable_instance_admin.GetAppProfileRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_get_app_profile_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.AppProfile() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_app_profile( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_app_profile_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_app_profile( - bigtable_instance_admin.GetAppProfileRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListAppProfilesRequest, - dict, - ], -) -def test_list_app_profiles(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListAppProfilesResponse( - next_page_token="next_page_token_value", - failed_locations=["failed_locations_value"], - ) - response = client.list_app_profiles(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.ListAppProfilesRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAppProfilesPager) - assert response.next_page_token == "next_page_token_value" - assert response.failed_locations == ["failed_locations_value"] - - -def test_list_app_profiles_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.ListAppProfilesRequest( - parent="parent_value", - page_token="page_token_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_app_profiles(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest( - parent="parent_value", - page_token="page_token_value", - ) - - -def test_list_app_profiles_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_app_profiles in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_app_profiles - ] = mock_rpc - request = {} - client.list_app_profiles(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_app_profiles(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_app_profiles_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.list_app_profiles - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.list_app_profiles - ] = mock_rpc - - request = {} - await client.list_app_profiles(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.list_app_profiles(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_app_profiles_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.ListAppProfilesRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListAppProfilesResponse( - next_page_token="next_page_token_value", - failed_locations=["failed_locations_value"], - ) - ) - response = await client.list_app_profiles(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.ListAppProfilesRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAppProfilesAsyncPager) - assert response.next_page_token == "next_page_token_value" - assert response.failed_locations == ["failed_locations_value"] - - -@pytest.mark.asyncio -async def test_list_app_profiles_async_from_dict(): - await test_list_app_profiles_async(request_type=dict) - - -def test_list_app_profiles_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListAppProfilesRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - call.return_value = bigtable_instance_admin.ListAppProfilesResponse() - client.list_app_profiles(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_list_app_profiles_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListAppProfilesRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListAppProfilesResponse() - ) - await client.list_app_profiles(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_list_app_profiles_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListAppProfilesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_app_profiles( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -def test_list_app_profiles_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_app_profiles( - bigtable_instance_admin.ListAppProfilesRequest(), - parent="parent_value", - ) - - -@pytest.mark.asyncio -async def test_list_app_profiles_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListAppProfilesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListAppProfilesResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_app_profiles( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_app_profiles_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_app_profiles( - bigtable_instance_admin.ListAppProfilesRequest(), - parent="parent_value", - ) - - -def test_list_app_profiles_pager(transport_name: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - instance.AppProfile(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], - next_page_token="def", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - ], - ), - RuntimeError, - ) - - expected_metadata = () - retry = retries.Retry() - timeout = 5 - expected_metadata = tuple(expected_metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), - ) - pager = client.list_app_profiles(request={}, retry=retry, timeout=timeout) - - assert pager._metadata == expected_metadata - assert pager._retry == retry - assert pager._timeout == timeout - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.AppProfile) for i in results) - - -def test_list_app_profiles_pages(transport_name: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - instance.AppProfile(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], - next_page_token="def", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - ], - ), - RuntimeError, - ) - pages = list(client.list_app_profiles(request={}).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.asyncio -async def test_list_app_profiles_async_pager(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - "__call__", - new_callable=mock.AsyncMock, - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - instance.AppProfile(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], - next_page_token="def", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_app_profiles( - request={}, - ) - assert async_pager.next_page_token == "abc" - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, instance.AppProfile) for i in responses) - - -@pytest.mark.asyncio -async def test_list_app_profiles_async_pages(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - "__call__", - new_callable=mock.AsyncMock, - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - instance.AppProfile(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], - next_page_token="def", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_app_profiles(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.UpdateAppProfileRequest, - dict, - ], -) -def test_update_app_profile(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.update_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.UpdateAppProfileRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_app_profile_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.UpdateAppProfileRequest() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_app_profile(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() - - -def test_update_app_profile_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.update_app_profile in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.update_app_profile - ] = mock_rpc - request = {} - client.update_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_app_profile(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_app_profile_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.update_app_profile - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.update_app_profile - ] = mock_rpc - - request = {} - await client.update_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.update_app_profile(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_app_profile_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.UpdateAppProfileRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.update_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.UpdateAppProfileRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_app_profile_async_from_dict(): - await test_update_app_profile_async(request_type=dict) - - -def test_update_app_profile_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.UpdateAppProfileRequest() - - request.app_profile.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "app_profile.name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_update_app_profile_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.UpdateAppProfileRequest() - - request.app_profile.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.update_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "app_profile.name=name_value", - ) in kw["metadata"] - - -def test_update_app_profile_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_app_profile( - app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].app_profile - mock_val = instance.AppProfile(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -def test_update_app_profile_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_app_profile( - bigtable_instance_admin.UpdateAppProfileRequest(), - app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.asyncio -async def test_update_app_profile_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_app_profile( - app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].app_profile - mock_val = instance.AppProfile(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_app_profile_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_app_profile( - bigtable_instance_admin.UpdateAppProfileRequest(), - app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteAppProfileRequest, - dict, - ], -) -def test_delete_app_profile(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.DeleteAppProfileRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_app_profile_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.DeleteAppProfileRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_app_profile(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest( - name="name_value", - ) - - -def test_delete_app_profile_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.delete_app_profile in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.delete_app_profile - ] = mock_rpc - request = {} - client.delete_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_app_profile(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_app_profile_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.delete_app_profile - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.delete_app_profile - ] = mock_rpc - - request = {} - await client.delete_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.delete_app_profile(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_app_profile_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.DeleteAppProfileRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.DeleteAppProfileRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_app_profile_async_from_dict(): - await test_delete_app_profile_async(request_type=dict) - - -def test_delete_app_profile_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteAppProfileRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - call.return_value = None - client.delete_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_delete_app_profile_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteAppProfileRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_delete_app_profile_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_app_profile( - name="name_value", - ignore_warnings=True, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - arg = args[0].ignore_warnings - mock_val = True - assert arg == mock_val - - -def test_delete_app_profile_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_app_profile( - bigtable_instance_admin.DeleteAppProfileRequest(), - name="name_value", - ignore_warnings=True, - ) - - -@pytest.mark.asyncio -async def test_delete_app_profile_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_app_profile( - name="name_value", - ignore_warnings=True, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - arg = args[0].ignore_warnings - mock_val = True - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_app_profile_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_app_profile( - bigtable_instance_admin.DeleteAppProfileRequest(), - name="name_value", - ignore_warnings=True, - ) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.GetIamPolicyRequest, - dict, - ], -) -def test_get_iam_policy(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - response = client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = iam_policy_pb2.GetIamPolicyRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -def test_get_iam_policy_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = iam_policy_pb2.GetIamPolicyRequest( - resource="resource_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_iam_policy(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest( - resource="resource_value", - ) - - -def test_get_iam_policy_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_iam_policy in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc - request = {} - client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_iam_policy_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.get_iam_policy - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.get_iam_policy - ] = mock_rpc - - request = {} - await client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.get_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_iam_policy_async( - transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - response = await client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = iam_policy_pb2.GetIamPolicyRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -@pytest.mark.asyncio -async def test_get_iam_policy_async_from_dict(): - await test_get_iam_policy_async(request_type=dict) - - -def test_get_iam_policy_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.GetIamPolicyRequest() - - request.resource = "resource_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "resource=resource_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_get_iam_policy_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.GetIamPolicyRequest() - - request.resource = "resource_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - await client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "resource=resource_value", - ) in kw["metadata"] - - -def test_get_iam_policy_from_dict_foreign(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - response = client.get_iam_policy( - request={ - "resource": "resource_value", - "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), - } - ) - call.assert_called() - - -def test_get_iam_policy_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_iam_policy( - resource="resource_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - - -def test_get_iam_policy_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", - ) - - -@pytest.mark.asyncio -async def test_get_iam_policy_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_iam_policy( - resource="resource_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_iam_policy_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.SetIamPolicyRequest, - dict, - ], -) -def test_set_iam_policy(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - response = client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = iam_policy_pb2.SetIamPolicyRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -def test_set_iam_policy_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = iam_policy_pb2.SetIamPolicyRequest( - resource="resource_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.set_iam_policy(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest( - resource="resource_value", - ) - - -def test_set_iam_policy_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.set_iam_policy in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc - request = {} - client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.set_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_set_iam_policy_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.set_iam_policy - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.set_iam_policy - ] = mock_rpc - - request = {} - await client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.set_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_set_iam_policy_async( - transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - response = await client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = iam_policy_pb2.SetIamPolicyRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -@pytest.mark.asyncio -async def test_set_iam_policy_async_from_dict(): - await test_set_iam_policy_async(request_type=dict) - - -def test_set_iam_policy_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.SetIamPolicyRequest() - - request.resource = "resource_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "resource=resource_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_set_iam_policy_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.SetIamPolicyRequest() - - request.resource = "resource_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - await client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "resource=resource_value", - ) in kw["metadata"] - - -def test_set_iam_policy_from_dict_foreign(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - response = client.set_iam_policy( - request={ - "resource": "resource_value", - "policy": policy_pb2.Policy(version=774), - "update_mask": field_mask_pb2.FieldMask(paths=["paths_value"]), - } - ) - call.assert_called() - - -def test_set_iam_policy_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_iam_policy( - resource="resource_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - - -def test_set_iam_policy_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", - ) - - -@pytest.mark.asyncio -async def test_set_iam_policy_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_iam_policy( - resource="resource_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_set_iam_policy_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.TestIamPermissionsRequest, - dict, - ], -) -def test_test_iam_permissions(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) - response = client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = iam_policy_pb2.TestIamPermissionsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] - - -def test_test_iam_permissions_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = iam_policy_pb2.TestIamPermissionsRequest( - resource="resource_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.test_iam_permissions(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest( - resource="resource_value", - ) - - -def test_test_iam_permissions_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.test_iam_permissions in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.test_iam_permissions - ] = mock_rpc - request = {} - client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.test_iam_permissions(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_test_iam_permissions_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.test_iam_permissions - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.test_iam_permissions - ] = mock_rpc - - request = {} - await client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.test_iam_permissions(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_test_iam_permissions_async( - transport: str = "grpc_asyncio", - request_type=iam_policy_pb2.TestIamPermissionsRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) - ) - response = await client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = iam_policy_pb2.TestIamPermissionsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] - - -@pytest.mark.asyncio -async def test_test_iam_permissions_async_from_dict(): - await test_test_iam_permissions_async(request_type=dict) - - -def test_test_iam_permissions_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.TestIamPermissionsRequest() - - request.resource = "resource_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "resource=resource_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_test_iam_permissions_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.TestIamPermissionsRequest() - - request.resource = "resource_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse() - ) - await client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "resource=resource_value", - ) in kw["metadata"] - - -def test_test_iam_permissions_from_dict_foreign(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - response = client.test_iam_permissions( - request={ - "resource": "resource_value", - "permissions": ["permissions_value"], - } - ) - call.assert_called() - - -def test_test_iam_permissions_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.test_iam_permissions( - resource="resource_value", - permissions=["permissions_value"], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - arg = args[0].permissions - mock_val = ["permissions_value"] - assert arg == mock_val - - -def test_test_iam_permissions_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], - ) - - -@pytest.mark.asyncio -async def test_test_iam_permissions_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.test_iam_permissions( - resource="resource_value", - permissions=["permissions_value"], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - arg = args[0].permissions - mock_val = ["permissions_value"] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_test_iam_permissions_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListHotTabletsRequest, - dict, - ], -) -def test_list_hot_tablets(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListHotTabletsResponse( - next_page_token="next_page_token_value", - ) - response = client.list_hot_tablets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.ListHotTabletsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHotTabletsPager) - assert response.next_page_token == "next_page_token_value" - - -def test_list_hot_tablets_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.ListHotTabletsRequest( - parent="parent_value", - page_token="page_token_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_hot_tablets(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListHotTabletsRequest( - parent="parent_value", - page_token="page_token_value", - ) - - -def test_list_hot_tablets_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_hot_tablets in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_hot_tablets - ] = mock_rpc - request = {} - client.list_hot_tablets(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_hot_tablets(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_hot_tablets_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.list_hot_tablets - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.list_hot_tablets - ] = mock_rpc - - request = {} - await client.list_hot_tablets(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.list_hot_tablets(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_hot_tablets_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.ListHotTabletsRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListHotTabletsResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client.list_hot_tablets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.ListHotTabletsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHotTabletsAsyncPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.asyncio -async def test_list_hot_tablets_async_from_dict(): - await test_list_hot_tablets_async(request_type=dict) - - -def test_list_hot_tablets_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListHotTabletsRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - call.return_value = bigtable_instance_admin.ListHotTabletsResponse() - client.list_hot_tablets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_list_hot_tablets_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListHotTabletsRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListHotTabletsResponse() - ) - await client.list_hot_tablets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_list_hot_tablets_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListHotTabletsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_hot_tablets( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -def test_list_hot_tablets_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_hot_tablets( - bigtable_instance_admin.ListHotTabletsRequest(), - parent="parent_value", - ) - - -@pytest.mark.asyncio -async def test_list_hot_tablets_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListHotTabletsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListHotTabletsResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_hot_tablets( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_hot_tablets_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_hot_tablets( - bigtable_instance_admin.ListHotTabletsRequest(), - parent="parent_value", - ) - - -def test_list_hot_tablets_pager(transport_name: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token="def", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - RuntimeError, - ) - - expected_metadata = () - retry = retries.Retry() - timeout = 5 - expected_metadata = tuple(expected_metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), - ) - pager = client.list_hot_tablets(request={}, retry=retry, timeout=timeout) - - assert pager._metadata == expected_metadata - assert pager._retry == retry - assert pager._timeout == timeout - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.HotTablet) for i in results) - - -def test_list_hot_tablets_pages(transport_name: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token="def", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - RuntimeError, - ) - pages = list(client.list_hot_tablets(request={}).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.asyncio -async def test_list_hot_tablets_async_pager(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), "__call__", new_callable=mock.AsyncMock - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token="def", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_hot_tablets( - request={}, - ) - assert async_pager.next_page_token == "abc" - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, instance.HotTablet) for i in responses) - - -@pytest.mark.asyncio -async def test_list_hot_tablets_async_pages(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), "__call__", new_callable=mock.AsyncMock - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token="def", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_hot_tablets(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateLogicalViewRequest, - dict, - ], -) -def test_create_logical_view(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.CreateLogicalViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_logical_view_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.CreateLogicalViewRequest( - parent="parent_value", - logical_view_id="logical_view_id_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_logical_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_logical_view(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateLogicalViewRequest( - parent="parent_value", - logical_view_id="logical_view_id_value", - ) - - -def test_create_logical_view_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_logical_view in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_logical_view - ] = mock_rpc - request = {} - client.create_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_logical_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_logical_view_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.create_logical_view - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.create_logical_view - ] = mock_rpc - - request = {} - await client.create_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.create_logical_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_logical_view_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.CreateLogicalViewRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.create_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.CreateLogicalViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_logical_view_async_from_dict(): - await test_create_logical_view_async(request_type=dict) - - -def test_create_logical_view_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateLogicalViewRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_logical_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_create_logical_view_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateLogicalViewRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_logical_view), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.create_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_create_logical_view_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_logical_view( - parent="parent_value", - logical_view=instance.LogicalView(name="name_value"), - logical_view_id="logical_view_id_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].logical_view - mock_val = instance.LogicalView(name="name_value") - assert arg == mock_val - arg = args[0].logical_view_id - mock_val = "logical_view_id_value" - assert arg == mock_val - - -def test_create_logical_view_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_logical_view( - bigtable_instance_admin.CreateLogicalViewRequest(), - parent="parent_value", - logical_view=instance.LogicalView(name="name_value"), - logical_view_id="logical_view_id_value", - ) - - -@pytest.mark.asyncio -async def test_create_logical_view_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_logical_view( - parent="parent_value", - logical_view=instance.LogicalView(name="name_value"), - logical_view_id="logical_view_id_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].logical_view - mock_val = instance.LogicalView(name="name_value") - assert arg == mock_val - arg = args[0].logical_view_id - mock_val = "logical_view_id_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_logical_view_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_logical_view( - bigtable_instance_admin.CreateLogicalViewRequest(), - parent="parent_value", - logical_view=instance.LogicalView(name="name_value"), - logical_view_id="logical_view_id_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetLogicalViewRequest, - dict, - ], -) -def test_get_logical_view(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.LogicalView( - name="name_value", - query="query_value", - etag="etag_value", - deletion_protection=True, - ) - response = client.get_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.GetLogicalViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.LogicalView) - assert response.name == "name_value" - assert response.query == "query_value" - assert response.etag == "etag_value" - assert response.deletion_protection is True - - -def test_get_logical_view_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.GetLogicalViewRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_logical_view(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetLogicalViewRequest( - name="name_value", - ) - - -def test_get_logical_view_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_logical_view in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.get_logical_view - ] = mock_rpc - request = {} - client.get_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_logical_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_logical_view_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.get_logical_view - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.get_logical_view - ] = mock_rpc - - request = {} - await client.get_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.get_logical_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_logical_view_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.GetLogicalViewRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.LogicalView( - name="name_value", - query="query_value", - etag="etag_value", - deletion_protection=True, - ) - ) - response = await client.get_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.GetLogicalViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.LogicalView) - assert response.name == "name_value" - assert response.query == "query_value" - assert response.etag == "etag_value" - assert response.deletion_protection is True - - -@pytest.mark.asyncio -async def test_get_logical_view_async_from_dict(): - await test_get_logical_view_async(request_type=dict) - - -def test_get_logical_view_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetLogicalViewRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: - call.return_value = instance.LogicalView() - client.get_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_get_logical_view_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetLogicalViewRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.LogicalView() - ) - await client.get_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_get_logical_view_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.LogicalView() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_logical_view( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_get_logical_view_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_logical_view( - bigtable_instance_admin.GetLogicalViewRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_get_logical_view_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = instance.LogicalView() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.LogicalView() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_logical_view( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_logical_view_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_logical_view( - bigtable_instance_admin.GetLogicalViewRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListLogicalViewsRequest, - dict, - ], -) -def test_list_logical_views(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListLogicalViewsResponse( - next_page_token="next_page_token_value", - ) - response = client.list_logical_views(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.ListLogicalViewsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListLogicalViewsPager) - assert response.next_page_token == "next_page_token_value" - - -def test_list_logical_views_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.ListLogicalViewsRequest( - parent="parent_value", - page_token="page_token_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_logical_views(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListLogicalViewsRequest( - parent="parent_value", - page_token="page_token_value", - ) - - -def test_list_logical_views_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.list_logical_views in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_logical_views - ] = mock_rpc - request = {} - client.list_logical_views(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_logical_views(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_logical_views_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.list_logical_views - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.list_logical_views - ] = mock_rpc - - request = {} - await client.list_logical_views(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.list_logical_views(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_logical_views_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.ListLogicalViewsRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListLogicalViewsResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client.list_logical_views(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.ListLogicalViewsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListLogicalViewsAsyncPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.asyncio -async def test_list_logical_views_async_from_dict(): - await test_list_logical_views_async(request_type=dict) - - -def test_list_logical_views_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListLogicalViewsRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), "__call__" - ) as call: - call.return_value = bigtable_instance_admin.ListLogicalViewsResponse() - client.list_logical_views(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_list_logical_views_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListLogicalViewsRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListLogicalViewsResponse() - ) - await client.list_logical_views(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_list_logical_views_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListLogicalViewsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_logical_views( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -def test_list_logical_views_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_logical_views( - bigtable_instance_admin.ListLogicalViewsRequest(), - parent="parent_value", - ) - - -@pytest.mark.asyncio -async def test_list_logical_views_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListLogicalViewsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListLogicalViewsResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_logical_views( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_logical_views_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_logical_views( - bigtable_instance_admin.ListLogicalViewsRequest(), - parent="parent_value", - ) - - -def test_list_logical_views_pager(transport_name: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), "__call__" - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - instance.LogicalView(), - instance.LogicalView(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[], - next_page_token="def", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - instance.LogicalView(), - ], - ), - RuntimeError, - ) - - expected_metadata = () - retry = retries.Retry() - timeout = 5 - expected_metadata = tuple(expected_metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), - ) - pager = client.list_logical_views(request={}, retry=retry, timeout=timeout) - - assert pager._metadata == expected_metadata - assert pager._retry == retry - assert pager._timeout == timeout - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.LogicalView) for i in results) - - -def test_list_logical_views_pages(transport_name: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), "__call__" - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - instance.LogicalView(), - instance.LogicalView(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[], - next_page_token="def", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - instance.LogicalView(), - ], - ), - RuntimeError, - ) - pages = list(client.list_logical_views(request={}).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.asyncio -async def test_list_logical_views_async_pager(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), - "__call__", - new_callable=mock.AsyncMock, - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - instance.LogicalView(), - instance.LogicalView(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[], - next_page_token="def", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - instance.LogicalView(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_logical_views( - request={}, - ) - assert async_pager.next_page_token == "abc" - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, instance.LogicalView) for i in responses) - - -@pytest.mark.asyncio -async def test_list_logical_views_async_pages(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), - "__call__", - new_callable=mock.AsyncMock, - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - instance.LogicalView(), - instance.LogicalView(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[], - next_page_token="def", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - instance.LogicalView(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_logical_views(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.UpdateLogicalViewRequest, - dict, - ], -) -def test_update_logical_view(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.update_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.UpdateLogicalViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_logical_view_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.UpdateLogicalViewRequest() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_logical_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_logical_view(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateLogicalViewRequest() - - -def test_update_logical_view_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.update_logical_view in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.update_logical_view - ] = mock_rpc - request = {} - client.update_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_logical_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_logical_view_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.update_logical_view - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.update_logical_view - ] = mock_rpc - - request = {} - await client.update_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.update_logical_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_logical_view_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.UpdateLogicalViewRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.update_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.UpdateLogicalViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_logical_view_async_from_dict(): - await test_update_logical_view_async(request_type=dict) - - -def test_update_logical_view_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.UpdateLogicalViewRequest() - - request.logical_view.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_logical_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "logical_view.name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_update_logical_view_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.UpdateLogicalViewRequest() - - request.logical_view.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_logical_view), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.update_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "logical_view.name=name_value", - ) in kw["metadata"] - - -def test_update_logical_view_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_logical_view( - logical_view=instance.LogicalView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].logical_view - mock_val = instance.LogicalView(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -def test_update_logical_view_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_logical_view( - bigtable_instance_admin.UpdateLogicalViewRequest(), - logical_view=instance.LogicalView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.asyncio -async def test_update_logical_view_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_logical_view( - logical_view=instance.LogicalView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].logical_view - mock_val = instance.LogicalView(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_logical_view_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_logical_view( - bigtable_instance_admin.UpdateLogicalViewRequest(), - logical_view=instance.LogicalView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteLogicalViewRequest, - dict, - ], -) -def test_delete_logical_view(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.DeleteLogicalViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_logical_view_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.DeleteLogicalViewRequest( - name="name_value", - etag="etag_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_logical_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_logical_view(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteLogicalViewRequest( - name="name_value", - etag="etag_value", - ) - - -def test_delete_logical_view_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.delete_logical_view in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.delete_logical_view - ] = mock_rpc - request = {} - client.delete_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_logical_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_logical_view_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.delete_logical_view - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.delete_logical_view - ] = mock_rpc - - request = {} - await client.delete_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.delete_logical_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_logical_view_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.DeleteLogicalViewRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.DeleteLogicalViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_logical_view_async_from_dict(): - await test_delete_logical_view_async(request_type=dict) - - -def test_delete_logical_view_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteLogicalViewRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_logical_view), "__call__" - ) as call: - call.return_value = None - client.delete_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_delete_logical_view_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteLogicalViewRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_logical_view), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_delete_logical_view_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_logical_view( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_delete_logical_view_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_logical_view( - bigtable_instance_admin.DeleteLogicalViewRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_delete_logical_view_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_logical_view( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_logical_view_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_logical_view( - bigtable_instance_admin.DeleteLogicalViewRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateMaterializedViewRequest, - dict, - ], -) -def test_create_materialized_view(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.CreateMaterializedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_materialized_view_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.CreateMaterializedViewRequest( - parent="parent_value", - materialized_view_id="materialized_view_id_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_materialized_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_materialized_view(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateMaterializedViewRequest( - parent="parent_value", - materialized_view_id="materialized_view_id_value", - ) - - -def test_create_materialized_view_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_materialized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_materialized_view - ] = mock_rpc - request = {} - client.create_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_materialized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_materialized_view_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.create_materialized_view - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.create_materialized_view - ] = mock_rpc - - request = {} - await client.create_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.create_materialized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_materialized_view_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.CreateMaterializedViewRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.create_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.CreateMaterializedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_materialized_view_async_from_dict(): - await test_create_materialized_view_async(request_type=dict) - - -def test_create_materialized_view_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateMaterializedViewRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_materialized_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_create_materialized_view_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateMaterializedViewRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_materialized_view), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.create_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_create_materialized_view_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_materialized_view( - parent="parent_value", - materialized_view=instance.MaterializedView(name="name_value"), - materialized_view_id="materialized_view_id_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].materialized_view - mock_val = instance.MaterializedView(name="name_value") - assert arg == mock_val - arg = args[0].materialized_view_id - mock_val = "materialized_view_id_value" - assert arg == mock_val - - -def test_create_materialized_view_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_materialized_view( - bigtable_instance_admin.CreateMaterializedViewRequest(), - parent="parent_value", - materialized_view=instance.MaterializedView(name="name_value"), - materialized_view_id="materialized_view_id_value", - ) - - -@pytest.mark.asyncio -async def test_create_materialized_view_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_materialized_view( - parent="parent_value", - materialized_view=instance.MaterializedView(name="name_value"), - materialized_view_id="materialized_view_id_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].materialized_view - mock_val = instance.MaterializedView(name="name_value") - assert arg == mock_val - arg = args[0].materialized_view_id - mock_val = "materialized_view_id_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_materialized_view_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_materialized_view( - bigtable_instance_admin.CreateMaterializedViewRequest(), - parent="parent_value", - materialized_view=instance.MaterializedView(name="name_value"), - materialized_view_id="materialized_view_id_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetMaterializedViewRequest, - dict, - ], -) -def test_get_materialized_view(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = instance.MaterializedView( - name="name_value", - query="query_value", - etag="etag_value", - deletion_protection=True, - ) - response = client.get_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.GetMaterializedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.MaterializedView) - assert response.name == "name_value" - assert response.query == "query_value" - assert response.etag == "etag_value" - assert response.deletion_protection is True - - -def test_get_materialized_view_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.GetMaterializedViewRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_materialized_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_materialized_view(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetMaterializedViewRequest( - name="name_value", - ) - - -def test_get_materialized_view_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.get_materialized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.get_materialized_view - ] = mock_rpc - request = {} - client.get_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_materialized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_materialized_view_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.get_materialized_view - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.get_materialized_view - ] = mock_rpc - - request = {} - await client.get_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.get_materialized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_materialized_view_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.GetMaterializedViewRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.MaterializedView( - name="name_value", - query="query_value", - etag="etag_value", - deletion_protection=True, - ) - ) - response = await client.get_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.GetMaterializedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.MaterializedView) - assert response.name == "name_value" - assert response.query == "query_value" - assert response.etag == "etag_value" - assert response.deletion_protection is True - - -@pytest.mark.asyncio -async def test_get_materialized_view_async_from_dict(): - await test_get_materialized_view_async(request_type=dict) - - -def test_get_materialized_view_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetMaterializedViewRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_materialized_view), "__call__" - ) as call: - call.return_value = instance.MaterializedView() - client.get_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_get_materialized_view_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetMaterializedViewRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_materialized_view), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.MaterializedView() - ) - await client.get_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_get_materialized_view_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = instance.MaterializedView() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_materialized_view( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_get_materialized_view_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_materialized_view( - bigtable_instance_admin.GetMaterializedViewRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_get_materialized_view_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = instance.MaterializedView() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.MaterializedView() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_materialized_view( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_materialized_view_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_materialized_view( - bigtable_instance_admin.GetMaterializedViewRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListMaterializedViewsRequest, - dict, - ], -) -def test_list_materialized_views(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse( - next_page_token="next_page_token_value", - ) - response = client.list_materialized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.ListMaterializedViewsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMaterializedViewsPager) - assert response.next_page_token == "next_page_token_value" - - -def test_list_materialized_views_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.ListMaterializedViewsRequest( - parent="parent_value", - page_token="page_token_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_materialized_views(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListMaterializedViewsRequest( - parent="parent_value", - page_token="page_token_value", - ) - - -def test_list_materialized_views_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.list_materialized_views - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_materialized_views - ] = mock_rpc - request = {} - client.list_materialized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_materialized_views(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_materialized_views_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.list_materialized_views - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.list_materialized_views - ] = mock_rpc - - request = {} - await client.list_materialized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.list_materialized_views(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_materialized_views_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.ListMaterializedViewsRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListMaterializedViewsResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client.list_materialized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.ListMaterializedViewsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMaterializedViewsAsyncPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.asyncio -async def test_list_materialized_views_async_from_dict(): - await test_list_materialized_views_async(request_type=dict) - - -def test_list_materialized_views_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListMaterializedViewsRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), "__call__" - ) as call: - call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() - client.list_materialized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_list_materialized_views_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListMaterializedViewsRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListMaterializedViewsResponse() - ) - await client.list_materialized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_list_materialized_views_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_materialized_views( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -def test_list_materialized_views_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_materialized_views( - bigtable_instance_admin.ListMaterializedViewsRequest(), - parent="parent_value", - ) - - -@pytest.mark.asyncio -async def test_list_materialized_views_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListMaterializedViewsResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_materialized_views( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_materialized_views_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_materialized_views( - bigtable_instance_admin.ListMaterializedViewsRequest(), - parent="parent_value", - ) - - -def test_list_materialized_views_pager(transport_name: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), "__call__" - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - instance.MaterializedView(), - instance.MaterializedView(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[], - next_page_token="def", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - instance.MaterializedView(), - ], - ), - RuntimeError, - ) - - expected_metadata = () - retry = retries.Retry() - timeout = 5 - expected_metadata = tuple(expected_metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), - ) - pager = client.list_materialized_views(request={}, retry=retry, timeout=timeout) - - assert pager._metadata == expected_metadata - assert pager._retry == retry - assert pager._timeout == timeout - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.MaterializedView) for i in results) - - -def test_list_materialized_views_pages(transport_name: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), "__call__" - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - instance.MaterializedView(), - instance.MaterializedView(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[], - next_page_token="def", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - instance.MaterializedView(), - ], - ), - RuntimeError, - ) - pages = list(client.list_materialized_views(request={}).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.asyncio -async def test_list_materialized_views_async_pager(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), - "__call__", - new_callable=mock.AsyncMock, - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - instance.MaterializedView(), - instance.MaterializedView(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[], - next_page_token="def", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - instance.MaterializedView(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_materialized_views( - request={}, - ) - assert async_pager.next_page_token == "abc" - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, instance.MaterializedView) for i in responses) - - -@pytest.mark.asyncio -async def test_list_materialized_views_async_pages(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), - "__call__", - new_callable=mock.AsyncMock, - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - instance.MaterializedView(), - instance.MaterializedView(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[], - next_page_token="def", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - instance.MaterializedView(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_materialized_views(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.UpdateMaterializedViewRequest, - dict, - ], -) -def test_update_materialized_view(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.update_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.UpdateMaterializedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_materialized_view_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.UpdateMaterializedViewRequest() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_materialized_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_materialized_view(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateMaterializedViewRequest() - - -def test_update_materialized_view_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.update_materialized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.update_materialized_view - ] = mock_rpc - request = {} - client.update_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_materialized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_materialized_view_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.update_materialized_view - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.update_materialized_view - ] = mock_rpc - - request = {} - await client.update_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.update_materialized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_materialized_view_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.UpdateMaterializedViewRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.update_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.UpdateMaterializedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_materialized_view_async_from_dict(): - await test_update_materialized_view_async(request_type=dict) - - -def test_update_materialized_view_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.UpdateMaterializedViewRequest() - - request.materialized_view.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_materialized_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "materialized_view.name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_update_materialized_view_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.UpdateMaterializedViewRequest() - - request.materialized_view.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_materialized_view), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.update_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "materialized_view.name=name_value", - ) in kw["metadata"] - - -def test_update_materialized_view_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_materialized_view( - materialized_view=instance.MaterializedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].materialized_view - mock_val = instance.MaterializedView(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -def test_update_materialized_view_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_materialized_view( - bigtable_instance_admin.UpdateMaterializedViewRequest(), - materialized_view=instance.MaterializedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.asyncio -async def test_update_materialized_view_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_materialized_view( - materialized_view=instance.MaterializedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].materialized_view - mock_val = instance.MaterializedView(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_materialized_view_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_materialized_view( - bigtable_instance_admin.UpdateMaterializedViewRequest(), - materialized_view=instance.MaterializedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteMaterializedViewRequest, - dict, - ], -) -def test_delete_materialized_view(request_type, transport: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.DeleteMaterializedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_materialized_view_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_instance_admin.DeleteMaterializedViewRequest( - name="name_value", - etag="etag_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_materialized_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_materialized_view(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteMaterializedViewRequest( - name="name_value", - etag="etag_value", - ) - - -def test_delete_materialized_view_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.delete_materialized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.delete_materialized_view - ] = mock_rpc - request = {} - client.delete_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_materialized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_materialized_view_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.delete_materialized_view - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.delete_materialized_view - ] = mock_rpc - - request = {} - await client.delete_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.delete_materialized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_materialized_view_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.DeleteMaterializedViewRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_instance_admin.DeleteMaterializedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_materialized_view_async_from_dict(): - await test_delete_materialized_view_async(request_type=dict) - - -def test_delete_materialized_view_field_headers(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteMaterializedViewRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_materialized_view), "__call__" - ) as call: - call.return_value = None - client.delete_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_delete_materialized_view_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteMaterializedViewRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_materialized_view), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_delete_materialized_view_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_materialized_view( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_delete_materialized_view_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_materialized_view( - bigtable_instance_admin.DeleteMaterializedViewRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_delete_materialized_view_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_materialized_view( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_materialized_view_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_materialized_view( - bigtable_instance_admin.DeleteMaterializedViewRequest(), - name="name_value", - ) - - -def test_create_instance_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.create_instance in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.create_instance] = mock_rpc - - request = {} - client.create_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_create_instance_rest_required_fields( - request_type=bigtable_instance_admin.CreateInstanceRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["instance_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - jsonified_request["instanceId"] = "instance_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "instanceId" in jsonified_request - assert jsonified_request["instanceId"] == "instance_id_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.create_instance(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_create_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.create_instance._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "instanceId", - "instance", - "clusters", - ) - ) - ) - - -def test_create_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - instance_id="instance_id_value", - instance=gba_instance.Instance(name="name_value"), - clusters={"key_value": gba_instance.Cluster(name="name_value")}, - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_instance(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1] - ) - - -def test_create_instance_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_instance( - bigtable_instance_admin.CreateInstanceRequest(), - parent="parent_value", - instance_id="instance_id_value", - instance=gba_instance.Instance(name="name_value"), - clusters={"key_value": gba_instance.Cluster(name="name_value")}, - ) - - -def test_get_instance_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_instance in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_instance] = mock_rpc - - request = {} - client.get_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_get_instance_rest_required_fields( - request_type=bigtable_instance_admin.GetInstanceRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.Instance() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.get_instance(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_get_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Instance() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.get_instance(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1] - ) - - -def test_get_instance_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_instance( - bigtable_instance_admin.GetInstanceRequest(), - name="name_value", - ) - - -def test_list_instances_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_instances in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.list_instances] = mock_rpc - - request = {} - client.list_instances(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_instances(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_list_instances_rest_required_fields( - request_type=bigtable_instance_admin.ListInstancesRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_instances._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_instances._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("page_token",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListInstancesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.list_instances(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_instances_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_instances._get_unset_required_fields({}) - assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) - - -def test_list_instances_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_instances(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1] - ) - - -def test_list_instances_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_instances( - bigtable_instance_admin.ListInstancesRequest(), - parent="parent_value", - ) - - -def test_update_instance_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.update_instance in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.update_instance] = mock_rpc - - request = {} - client.update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.update_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_update_instance_rest_required_fields(request_type=instance.Instance): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["display_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["displayName"] = "display_name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "displayName" in jsonified_request - assert jsonified_request["displayName"] == "display_name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.Instance() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "put", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.update_instance(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_update_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.update_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("displayName",))) - - -def test_partial_update_instance_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.partial_update_instance - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.partial_update_instance - ] = mock_rpc - - request = {} - client.partial_update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.partial_update_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_partial_update_instance_rest_required_fields( - request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).partial_update_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).partial_update_instance._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.partial_update_instance(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_partial_update_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.partial_update_instance._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("updateMask",)) - & set( - ( - "instance", - "updateMask", - ) - ) - ) - - -def test_partial_update_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"instance": {"name": "projects/sample1/instances/sample2"}} - - # get truthy value for each flattened field - mock_args = dict( - instance=gba_instance.Instance(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.partial_update_instance(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{instance.name=projects/*/instances/*}" % client.transport._host, - args[1], - ) - - -def test_partial_update_instance_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.partial_update_instance( - bigtable_instance_admin.PartialUpdateInstanceRequest(), - instance=gba_instance.Instance(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_delete_instance_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.delete_instance in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.delete_instance] = mock_rpc - - request = {} - client.delete_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_instance(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_delete_instance_rest_required_fields( - request_type=bigtable_instance_admin.DeleteInstanceRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.delete_instance(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_delete_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.delete_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_delete_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.delete_instance(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1] - ) - - -def test_delete_instance_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_instance( - bigtable_instance_admin.DeleteInstanceRequest(), - name="name_value", - ) - - -def test_create_cluster_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.create_cluster in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc - - request = {} - client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_create_cluster_rest_required_fields( - request_type=bigtable_instance_admin.CreateClusterRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["cluster_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - assert "clusterId" not in jsonified_request - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "clusterId" in jsonified_request - assert jsonified_request["clusterId"] == request_init["cluster_id"] - - jsonified_request["parent"] = "parent_value" - jsonified_request["clusterId"] = "cluster_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_cluster._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("cluster_id",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "clusterId" in jsonified_request - assert jsonified_request["clusterId"] == "cluster_id_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.create_cluster(request) - - expected_params = [ - ( - "clusterId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_create_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.create_cluster._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("clusterId",)) - & set( - ( - "parent", - "clusterId", - "cluster", - ) - ) - ) - - -def test_create_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - cluster_id="cluster_id_value", - cluster=instance.Cluster(name="name_value"), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_cluster(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, - args[1], - ) - - -def test_create_cluster_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_cluster( - bigtable_instance_admin.CreateClusterRequest(), - parent="parent_value", - cluster_id="cluster_id_value", - cluster=instance.Cluster(name="name_value"), - ) - - -def test_get_cluster_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_cluster in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_cluster] = mock_rpc - - request = {} - client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_get_cluster_rest_required_fields( - request_type=bigtable_instance_admin.GetClusterRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.Cluster() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.get_cluster(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_cluster._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_get_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Cluster() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.get_cluster(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, - args[1], - ) - - -def test_get_cluster_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_cluster( - bigtable_instance_admin.GetClusterRequest(), - name="name_value", - ) - - -def test_list_clusters_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_clusters in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.list_clusters] = mock_rpc - - request = {} - client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_clusters(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_list_clusters_rest_required_fields( - request_type=bigtable_instance_admin.ListClustersRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_clusters._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_clusters._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("page_token",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.list_clusters(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_clusters_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_clusters._get_unset_required_fields({}) - assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) - - -def test_list_clusters_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_clusters(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, - args[1], - ) - - -def test_list_clusters_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_clusters( - bigtable_instance_admin.ListClustersRequest(), - parent="parent_value", - ) - - -def test_update_cluster_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.update_cluster in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc - - request = {} - client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_partial_update_cluster_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.partial_update_cluster - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.partial_update_cluster - ] = mock_rpc - - request = {} - client.partial_update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.partial_update_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_partial_update_cluster_rest_required_fields( - request_type=bigtable_instance_admin.PartialUpdateClusterRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).partial_update_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).partial_update_cluster._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.partial_update_cluster(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_partial_update_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.partial_update_cluster._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("updateMask",)) - & set( - ( - "cluster", - "updateMask", - ) - ) - ) - - -def test_partial_update_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} - } - - # get truthy value for each flattened field - mock_args = dict( - cluster=instance.Cluster(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.partial_update_cluster(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{cluster.name=projects/*/instances/*/clusters/*}" - % client.transport._host, - args[1], - ) - - -def test_partial_update_cluster_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.partial_update_cluster( - bigtable_instance_admin.PartialUpdateClusterRequest(), - cluster=instance.Cluster(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_delete_cluster_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.delete_cluster in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.delete_cluster] = mock_rpc - - request = {} - client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_delete_cluster_rest_required_fields( - request_type=bigtable_instance_admin.DeleteClusterRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.delete_cluster(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_delete_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.delete_cluster._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_delete_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.delete_cluster(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, - args[1], - ) - - -def test_delete_cluster_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_cluster( - bigtable_instance_admin.DeleteClusterRequest(), - name="name_value", - ) - - -def test_create_app_profile_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_app_profile in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_app_profile - ] = mock_rpc - - request = {} - client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.create_app_profile(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_create_app_profile_rest_required_fields( - request_type=bigtable_instance_admin.CreateAppProfileRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["app_profile_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - assert "appProfileId" not in jsonified_request - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "appProfileId" in jsonified_request - assert jsonified_request["appProfileId"] == request_init["app_profile_id"] - - jsonified_request["parent"] = "parent_value" - jsonified_request["appProfileId"] = "app_profile_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_app_profile._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "app_profile_id", - "ignore_warnings", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "appProfileId" in jsonified_request - assert jsonified_request["appProfileId"] == "app_profile_id_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.create_app_profile(request) - - expected_params = [ - ( - "appProfileId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_create_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.create_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "appProfileId", - "ignoreWarnings", - ) - ) - & set( - ( - "parent", - "appProfileId", - "appProfile", - ) - ) - ) - - -def test_create_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=instance.AppProfile(name="name_value"), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_app_profile(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/appProfiles" - % client.transport._host, - args[1], - ) - - -def test_create_app_profile_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_app_profile( - bigtable_instance_admin.CreateAppProfileRequest(), - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=instance.AppProfile(name="name_value"), - ) - - -def test_get_app_profile_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_app_profile in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_app_profile] = mock_rpc - - request = {} - client.get_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_app_profile(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_get_app_profile_rest_required_fields( - request_type=bigtable_instance_admin.GetAppProfileRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.get_app_profile(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_get_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.get_app_profile(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/appProfiles/*}" - % client.transport._host, - args[1], - ) - - -def test_get_app_profile_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_app_profile( - bigtable_instance_admin.GetAppProfileRequest(), - name="name_value", - ) - - -def test_list_app_profiles_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_app_profiles in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_app_profiles - ] = mock_rpc - - request = {} - client.list_app_profiles(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_app_profiles(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_list_app_profiles_rest_required_fields( - request_type=bigtable_instance_admin.ListAppProfilesRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_app_profiles._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_app_profiles._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.list_app_profiles(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_app_profiles_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_app_profiles._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) - - -def test_list_app_profiles_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_app_profiles(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/appProfiles" - % client.transport._host, - args[1], - ) - - -def test_list_app_profiles_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_app_profiles( - bigtable_instance_admin.ListAppProfilesRequest(), - parent="parent_value", - ) - - -def test_list_app_profiles_rest_pager(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - instance.AppProfile(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], - next_page_token="def", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_instance_admin.ListAppProfilesResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/instances/sample2"} - - pager = client.list_app_profiles(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.AppProfile) for i in results) - - pages = list(client.list_app_profiles(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -def test_update_app_profile_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.update_app_profile in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.update_app_profile - ] = mock_rpc - - request = {} - client.update_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_app_profile(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_update_app_profile_rest_required_fields( - request_type=bigtable_instance_admin.UpdateAppProfileRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_app_profile._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "ignore_warnings", - "update_mask", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.update_app_profile(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_update_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.update_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "ignoreWarnings", - "updateMask", - ) - ) - & set( - ( - "appProfile", - "updateMask", - ) - ) - ) - - -def test_update_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "app_profile": { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - } - - # get truthy value for each flattened field - mock_args = dict( - app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.update_app_profile(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" - % client.transport._host, - args[1], - ) - - -def test_update_app_profile_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_app_profile( - bigtable_instance_admin.UpdateAppProfileRequest(), - app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_delete_app_profile_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.delete_app_profile in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.delete_app_profile - ] = mock_rpc - - request = {} - client.delete_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_app_profile(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_delete_app_profile_rest_required_fields( - request_type=bigtable_instance_admin.DeleteAppProfileRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request_init["ignore_warnings"] = False - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - assert "ignoreWarnings" not in jsonified_request - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "ignoreWarnings" in jsonified_request - assert jsonified_request["ignoreWarnings"] == request_init["ignore_warnings"] - - jsonified_request["name"] = "name_value" - jsonified_request["ignoreWarnings"] = True - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_app_profile._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("ignore_warnings",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - assert "ignoreWarnings" in jsonified_request - assert jsonified_request["ignoreWarnings"] == True - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.delete_app_profile(request) - - expected_params = [ - ( - "ignoreWarnings", - str(False).lower(), - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_delete_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.delete_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("ignoreWarnings",)) - & set( - ( - "name", - "ignoreWarnings", - ) - ) - ) - - -def test_delete_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ignore_warnings=True, - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.delete_app_profile(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/appProfiles/*}" - % client.transport._host, - args[1], - ) - - -def test_delete_app_profile_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_app_profile( - bigtable_instance_admin.DeleteAppProfileRequest(), - name="name_value", - ignore_warnings=True, - ) - - -def test_get_iam_policy_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_iam_policy in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc - - request = {} - client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_get_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.GetIamPolicyRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = "resource_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.get_iam_policy(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource",))) - - -def test_get_iam_policy_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = {"resource": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.get_iam_policy(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*}:getIamPolicy" - % client.transport._host, - args[1], - ) - - -def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", - ) - - -def test_set_iam_policy_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.set_iam_policy in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc - - request = {} - client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.set_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_set_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.SetIamPolicyRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = "resource_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.set_iam_policy(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_set_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.set_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "resource", - "policy", - ) - ) - ) - - -def test_set_iam_policy_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = {"resource": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.set_iam_policy(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*}:setIamPolicy" - % client.transport._host, - args[1], - ) - - -def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", - ) - - -def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.test_iam_permissions in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.test_iam_permissions - ] = mock_rpc - - request = {} - client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.test_iam_permissions(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_test_iam_permissions_rest_required_fields( - request_type=iam_policy_pb2.TestIamPermissionsRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request_init["permissions"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = "resource_value" - jsonified_request["permissions"] = "permissions_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - assert "permissions" in jsonified_request - assert jsonified_request["permissions"] == "permissions_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.test_iam_permissions(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_test_iam_permissions_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "resource", - "permissions", - ) - ) - ) - - -def test_test_iam_permissions_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"resource": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - permissions=["permissions_value"], - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.test_iam_permissions(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*}:testIamPermissions" - % client.transport._host, - args[1], - ) - - -def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], - ) - - -def test_list_hot_tablets_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_hot_tablets in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_hot_tablets - ] = mock_rpc - - request = {} - client.list_hot_tablets(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_hot_tablets(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_list_hot_tablets_rest_required_fields( - request_type=bigtable_instance_admin.ListHotTabletsRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_hot_tablets._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_hot_tablets._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "end_time", - "page_size", - "page_token", - "start_time", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.list_hot_tablets(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_hot_tablets_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_hot_tablets._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "endTime", - "pageSize", - "pageToken", - "startTime", - ) - ) - & set(("parent",)) - ) - - -def test_list_hot_tablets_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_hot_tablets(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets" - % client.transport._host, - args[1], - ) - - -def test_list_hot_tablets_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_hot_tablets( - bigtable_instance_admin.ListHotTabletsRequest(), - parent="parent_value", - ) - - -def test_list_hot_tablets_rest_pager(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token="def", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_instance_admin.ListHotTabletsResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - pager = client.list_hot_tablets(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.HotTablet) for i in results) - - pages = list(client.list_hot_tablets(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -def test_create_logical_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_logical_view in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_logical_view - ] = mock_rpc - - request = {} - client.create_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_logical_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_create_logical_view_rest_required_fields( - request_type=bigtable_instance_admin.CreateLogicalViewRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["logical_view_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - assert "logicalViewId" not in jsonified_request - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_logical_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "logicalViewId" in jsonified_request - assert jsonified_request["logicalViewId"] == request_init["logical_view_id"] - - jsonified_request["parent"] = "parent_value" - jsonified_request["logicalViewId"] = "logical_view_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_logical_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("logical_view_id",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "logicalViewId" in jsonified_request - assert jsonified_request["logicalViewId"] == "logical_view_id_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.create_logical_view(request) - - expected_params = [ - ( - "logicalViewId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_create_logical_view_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.create_logical_view._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("logicalViewId",)) - & set( - ( - "parent", - "logicalViewId", - "logicalView", - ) - ) - ) - - -def test_create_logical_view_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - logical_view=instance.LogicalView(name="name_value"), - logical_view_id="logical_view_id_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_logical_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/logicalViews" - % client.transport._host, - args[1], - ) - - -def test_create_logical_view_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_logical_view( - bigtable_instance_admin.CreateLogicalViewRequest(), - parent="parent_value", - logical_view=instance.LogicalView(name="name_value"), - logical_view_id="logical_view_id_value", - ) - - -def test_get_logical_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_logical_view in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.get_logical_view - ] = mock_rpc - - request = {} - client.get_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_logical_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_get_logical_view_rest_required_fields( - request_type=bigtable_instance_admin.GetLogicalViewRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_logical_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_logical_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.LogicalView() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.LogicalView.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.get_logical_view(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_logical_view_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_logical_view._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_get_logical_view_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.LogicalView() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/logicalViews/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.LogicalView.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.get_logical_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/logicalViews/*}" - % client.transport._host, - args[1], - ) - - -def test_get_logical_view_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_logical_view( - bigtable_instance_admin.GetLogicalViewRequest(), - name="name_value", - ) - - -def test_list_logical_views_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.list_logical_views in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_logical_views - ] = mock_rpc - - request = {} - client.list_logical_views(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_logical_views(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_list_logical_views_rest_required_fields( - request_type=bigtable_instance_admin.ListLogicalViewsRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_logical_views._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_logical_views._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListLogicalViewsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListLogicalViewsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.list_logical_views(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_logical_views_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_logical_views._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) - - -def test_list_logical_views_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListLogicalViewsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListLogicalViewsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_logical_views(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/logicalViews" - % client.transport._host, - args[1], - ) - - -def test_list_logical_views_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_logical_views( - bigtable_instance_admin.ListLogicalViewsRequest(), - parent="parent_value", - ) - - -def test_list_logical_views_rest_pager(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - instance.LogicalView(), - instance.LogicalView(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[], - next_page_token="def", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListLogicalViewsResponse( - logical_views=[ - instance.LogicalView(), - instance.LogicalView(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_instance_admin.ListLogicalViewsResponse.to_json(x) - for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/instances/sample2"} - - pager = client.list_logical_views(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.LogicalView) for i in results) - - pages = list(client.list_logical_views(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -def test_update_logical_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.update_logical_view in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.update_logical_view - ] = mock_rpc - - request = {} - client.update_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_logical_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_update_logical_view_rest_required_fields( - request_type=bigtable_instance_admin.UpdateLogicalViewRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_logical_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_logical_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.update_logical_view(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_update_logical_view_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.update_logical_view._get_unset_required_fields({}) - assert set(unset_fields) == (set(("updateMask",)) & set(("logicalView",))) - - -def test_update_logical_view_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "logical_view": { - "name": "projects/sample1/instances/sample2/logicalViews/sample3" - } - } - - # get truthy value for each flattened field - mock_args = dict( - logical_view=instance.LogicalView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.update_logical_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{logical_view.name=projects/*/instances/*/logicalViews/*}" - % client.transport._host, - args[1], - ) - - -def test_update_logical_view_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_logical_view( - bigtable_instance_admin.UpdateLogicalViewRequest(), - logical_view=instance.LogicalView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_delete_logical_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.delete_logical_view in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.delete_logical_view - ] = mock_rpc - - request = {} - client.delete_logical_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_logical_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_delete_logical_view_rest_required_fields( - request_type=bigtable_instance_admin.DeleteLogicalViewRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_logical_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_logical_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("etag",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.delete_logical_view(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_delete_logical_view_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.delete_logical_view._get_unset_required_fields({}) - assert set(unset_fields) == (set(("etag",)) & set(("name",))) - - -def test_delete_logical_view_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/logicalViews/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.delete_logical_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/logicalViews/*}" - % client.transport._host, - args[1], - ) - - -def test_delete_logical_view_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_logical_view( - bigtable_instance_admin.DeleteLogicalViewRequest(), - name="name_value", - ) - - -def test_create_materialized_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_materialized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_materialized_view - ] = mock_rpc - - request = {} - client.create_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_materialized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_create_materialized_view_rest_required_fields( - request_type=bigtable_instance_admin.CreateMaterializedViewRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["materialized_view_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - assert "materializedViewId" not in jsonified_request - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_materialized_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "materializedViewId" in jsonified_request - assert ( - jsonified_request["materializedViewId"] == request_init["materialized_view_id"] - ) - - jsonified_request["parent"] = "parent_value" - jsonified_request["materializedViewId"] = "materialized_view_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_materialized_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("materialized_view_id",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "materializedViewId" in jsonified_request - assert jsonified_request["materializedViewId"] == "materialized_view_id_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.create_materialized_view(request) - - expected_params = [ - ( - "materializedViewId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_create_materialized_view_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.create_materialized_view._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("materializedViewId",)) - & set( - ( - "parent", - "materializedViewId", - "materializedView", - ) - ) - ) - - -def test_create_materialized_view_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - materialized_view=instance.MaterializedView(name="name_value"), - materialized_view_id="materialized_view_id_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_materialized_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/materializedViews" - % client.transport._host, - args[1], - ) - - -def test_create_materialized_view_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_materialized_view( - bigtable_instance_admin.CreateMaterializedViewRequest(), - parent="parent_value", - materialized_view=instance.MaterializedView(name="name_value"), - materialized_view_id="materialized_view_id_value", - ) - - -def test_get_materialized_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.get_materialized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.get_materialized_view - ] = mock_rpc - - request = {} - client.get_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_materialized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_get_materialized_view_rest_required_fields( - request_type=bigtable_instance_admin.GetMaterializedViewRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_materialized_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_materialized_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.MaterializedView() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.MaterializedView.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.get_materialized_view(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_materialized_view_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_materialized_view._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_get_materialized_view_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.MaterializedView() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/materializedViews/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.MaterializedView.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.get_materialized_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/materializedViews/*}" - % client.transport._host, - args[1], - ) - - -def test_get_materialized_view_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_materialized_view( - bigtable_instance_admin.GetMaterializedViewRequest(), - name="name_value", - ) - - -def test_list_materialized_views_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.list_materialized_views - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_materialized_views - ] = mock_rpc - - request = {} - client.list_materialized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_materialized_views(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_list_materialized_views_rest_required_fields( - request_type=bigtable_instance_admin.ListMaterializedViewsRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_materialized_views._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_materialized_views._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListMaterializedViewsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListMaterializedViewsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.list_materialized_views(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_materialized_views_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_materialized_views._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) - - -def test_list_materialized_views_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListMaterializedViewsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListMaterializedViewsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_materialized_views(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/materializedViews" - % client.transport._host, - args[1], - ) - - -def test_list_materialized_views_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_materialized_views( - bigtable_instance_admin.ListMaterializedViewsRequest(), - parent="parent_value", - ) - - -def test_list_materialized_views_rest_pager(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - instance.MaterializedView(), - instance.MaterializedView(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[], - next_page_token="def", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListMaterializedViewsResponse( - materialized_views=[ - instance.MaterializedView(), - instance.MaterializedView(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_instance_admin.ListMaterializedViewsResponse.to_json(x) - for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/instances/sample2"} - - pager = client.list_materialized_views(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.MaterializedView) for i in results) - - pages = list(client.list_materialized_views(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -def test_update_materialized_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.update_materialized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.update_materialized_view - ] = mock_rpc - - request = {} - client.update_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_materialized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_update_materialized_view_rest_required_fields( - request_type=bigtable_instance_admin.UpdateMaterializedViewRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_materialized_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_materialized_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.update_materialized_view(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_update_materialized_view_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.update_materialized_view._get_unset_required_fields({}) - assert set(unset_fields) == (set(("updateMask",)) & set(("materializedView",))) - - -def test_update_materialized_view_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "materialized_view": { - "name": "projects/sample1/instances/sample2/materializedViews/sample3" - } - } - - # get truthy value for each flattened field - mock_args = dict( - materialized_view=instance.MaterializedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.update_materialized_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{materialized_view.name=projects/*/instances/*/materializedViews/*}" - % client.transport._host, - args[1], - ) - - -def test_update_materialized_view_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_materialized_view( - bigtable_instance_admin.UpdateMaterializedViewRequest(), - materialized_view=instance.MaterializedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_delete_materialized_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.delete_materialized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.delete_materialized_view - ] = mock_rpc - - request = {} - client.delete_materialized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_materialized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_delete_materialized_view_rest_required_fields( - request_type=bigtable_instance_admin.DeleteMaterializedViewRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_materialized_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_materialized_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("etag",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.delete_materialized_view(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_delete_materialized_view_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.delete_materialized_view._get_unset_required_fields({}) - assert set(unset_fields) == (set(("etag",)) & set(("name",))) - - -def test_delete_materialized_view_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/materializedViews/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.delete_materialized_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/materializedViews/*}" - % client.transport._host, - args[1], - ) - - -def test_delete_materialized_view_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_materialized_view( - bigtable_instance_admin.DeleteMaterializedViewRequest(), - name="name_value", - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options=options, - transport=transport, - ) - - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = BigtableInstanceAdminClient(transport=transport) - assert client.transport is transport - - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - transports.BigtableInstanceAdminRestTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - - -def test_transport_kind_grpc(): - transport = BigtableInstanceAdminClient.get_transport_class("grpc")( - credentials=ga_credentials.AnonymousCredentials() - ) - assert transport.kind == "grpc" - - -def test_initialize_client_w_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc" - ) - assert client is not None - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_instance_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateInstanceRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_instance_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - call.return_value = instance.Instance() - client.get_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetInstanceRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_instances_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - call.return_value = bigtable_instance_admin.ListInstancesResponse() - client.list_instances(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListInstancesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_instance_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - call.return_value = instance.Instance() - client.update_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = instance.Instance() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_partial_update_instance_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.partial_update_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_instance_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - call.return_value = None - client.delete_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteInstanceRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_cluster_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateClusterRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_cluster_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - call.return_value = instance.Cluster() - client.get_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetClusterRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_clusters_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - call.return_value = bigtable_instance_admin.ListClustersResponse() - client.list_clusters(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListClustersRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_cluster_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = instance.Cluster() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_partial_update_cluster_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.partial_update_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_cluster_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - call.return_value = None - client.delete_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteClusterRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_app_profile_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - call.return_value = instance.AppProfile() - client.create_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateAppProfileRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_app_profile_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - call.return_value = instance.AppProfile() - client.get_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetAppProfileRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_app_profiles_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - call.return_value = bigtable_instance_admin.ListAppProfilesResponse() - client.list_app_profiles(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListAppProfilesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_app_profile_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.UpdateAppProfileRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_app_profile_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - call.return_value = None - client.delete_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteAppProfileRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_iam_policy_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.get_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.GetIamPolicyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_set_iam_policy_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.set_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.SetIamPolicyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_test_iam_permissions_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - client.test_iam_permissions(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.TestIamPermissionsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_hot_tablets_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - call.return_value = bigtable_instance_admin.ListHotTabletsResponse() - client.list_hot_tablets(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListHotTabletsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_logical_view_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_logical_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_logical_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateLogicalViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_logical_view_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: - call.return_value = instance.LogicalView() - client.get_logical_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetLogicalViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_logical_views_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), "__call__" - ) as call: - call.return_value = bigtable_instance_admin.ListLogicalViewsResponse() - client.list_logical_views(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListLogicalViewsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_logical_view_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_logical_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_logical_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.UpdateLogicalViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_logical_view_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_logical_view), "__call__" - ) as call: - call.return_value = None - client.delete_logical_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteLogicalViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_materialized_view_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_materialized_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_materialized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateMaterializedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_materialized_view_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_materialized_view), "__call__" - ) as call: - call.return_value = instance.MaterializedView() - client.get_materialized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetMaterializedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_materialized_views_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), "__call__" - ) as call: - call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() - client.list_materialized_views(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListMaterializedViewsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_materialized_view_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_materialized_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_materialized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.UpdateMaterializedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_materialized_view_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_materialized_view), "__call__" - ) as call: - call.return_value = None - client.delete_materialized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteMaterializedViewRequest() - - assert args[0] == request_msg - - -def test_transport_kind_grpc_asyncio(): - transport = BigtableInstanceAdminAsyncClient.get_transport_class("grpc_asyncio")( - credentials=async_anonymous_credentials() - ) - assert transport.kind == "grpc_asyncio" - - -def test_initialize_client_w_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), transport="grpc_asyncio" - ) - assert client is not None - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_instance_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.create_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateInstanceRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_instance_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - satisfies_pzi=True, - ) - ) - await client.get_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetInstanceRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_instances_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListInstancesResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - ) - await client.list_instances(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListInstancesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_instance_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - satisfies_pzi=True, - ) - ) - await client.update_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = instance.Instance() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_partial_update_instance_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.partial_update_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_instance_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteInstanceRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_cluster_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.create_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateClusterRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_cluster_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.Cluster( - name="name_value", - location="location_value", - state=instance.Cluster.State.READY, - serve_nodes=1181, - node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, - default_storage_type=common.StorageType.SSD, - ) - ) - await client.get_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetClusterRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_clusters_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListClustersResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - ) - await client.list_clusters(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListClustersRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_cluster_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.update_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = instance.Cluster() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_partial_update_cluster_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.partial_update_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_cluster_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteClusterRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_app_profile_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - ) - ) - await client.create_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateAppProfileRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_app_profile_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - ) - ) - await client.get_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetAppProfileRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_app_profiles_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListAppProfilesResponse( - next_page_token="next_page_token_value", - failed_locations=["failed_locations_value"], - ) - ) - await client.list_app_profiles(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListAppProfilesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_app_profile_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.update_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.UpdateAppProfileRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_app_profile_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteAppProfileRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_iam_policy_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - await client.get_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.GetIamPolicyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_set_iam_policy_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - await client.set_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.SetIamPolicyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_test_iam_permissions_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) - ) - await client.test_iam_permissions(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.TestIamPermissionsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_hot_tablets_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListHotTabletsResponse( - next_page_token="next_page_token_value", - ) - ) - await client.list_hot_tablets(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListHotTabletsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_logical_view_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.create_logical_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateLogicalViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_logical_view_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.LogicalView( - name="name_value", - query="query_value", - etag="etag_value", - deletion_protection=True, - ) - ) - await client.get_logical_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetLogicalViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_logical_views_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListLogicalViewsResponse( - next_page_token="next_page_token_value", - ) - ) - await client.list_logical_views(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListLogicalViewsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_logical_view_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.update_logical_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.UpdateLogicalViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_logical_view_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_logical_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_logical_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteLogicalViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_materialized_view_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.create_materialized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateMaterializedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_materialized_view_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.MaterializedView( - name="name_value", - query="query_value", - etag="etag_value", - deletion_protection=True, - ) - ) - await client.get_materialized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetMaterializedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_materialized_views_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListMaterializedViewsResponse( - next_page_token="next_page_token_value", - ) - ) - await client.list_materialized_views(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListMaterializedViewsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_materialized_view_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.update_materialized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.UpdateMaterializedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_materialized_view_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_materialized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_materialized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteMaterializedViewRequest() - - assert args[0] == request_msg - - -def test_transport_kind_rest(): - transport = BigtableInstanceAdminClient.get_transport_class("rest")( - credentials=ga_credentials.AnonymousCredentials() - ) - assert transport.kind == "rest" - - -def test_create_instance_rest_bad_request( - request_type=bigtable_instance_admin.CreateInstanceRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_instance(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateInstanceRequest, - dict, - ], -) -def test_create_instance_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_instance(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_instance" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_create_instance_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_instance" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.CreateInstanceRequest.pb( - bigtable_instance_admin.CreateInstanceRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_instance_admin.CreateInstanceRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.create_instance( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_get_instance_rest_bad_request( - request_type=bigtable_instance_admin.GetInstanceRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_instance(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetInstanceRequest, - dict, - ], -) -def test_get_instance_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - satisfies_pzi=True, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_instance(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - assert response.satisfies_pzi is True - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_instance" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_get_instance_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_instance" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.GetInstanceRequest.pb( - bigtable_instance_admin.GetInstanceRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = instance.Instance.to_json(instance.Instance()) - req.return_value.content = return_value - - request = bigtable_instance_admin.GetInstanceRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.Instance() - post_with_metadata.return_value = instance.Instance(), metadata - - client.get_instance( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_list_instances_rest_bad_request( - request_type=bigtable_instance_admin.ListInstancesRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_instances(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListInstancesRequest, - dict, - ], -) -def test_list_instances_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_instances(request) - - assert response.raw_page is response - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_instances_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_instances" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_list_instances_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_instances" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.ListInstancesRequest.pb( - bigtable_instance_admin.ListInstancesRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_instance_admin.ListInstancesResponse.to_json( - bigtable_instance_admin.ListInstancesResponse() - ) - req.return_value.content = return_value - - request = bigtable_instance_admin.ListInstancesRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListInstancesResponse() - post_with_metadata.return_value = ( - bigtable_instance_admin.ListInstancesResponse(), - metadata, - ) - - client.list_instances( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_update_instance_rest_bad_request(request_type=instance.Instance): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_instance(request) - - -@pytest.mark.parametrize( - "request_type", - [ - instance.Instance, - dict, - ], -) -def test_update_instance_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - satisfies_pzi=True, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_instance(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - assert response.satisfies_pzi is True - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_instance" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_update_instance_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_instance" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = instance.Instance.pb(instance.Instance()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = instance.Instance.to_json(instance.Instance()) - req.return_value.content = return_value - - request = instance.Instance() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.Instance() - post_with_metadata.return_value = instance.Instance(), metadata - - client.update_instance( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_partial_update_instance_rest_bad_request( - request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.partial_update_instance(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.PartialUpdateInstanceRequest, - dict, - ], -) -def test_partial_update_instance_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} - request_init["instance"] = { - "name": "projects/sample1/instances/sample2", - "display_name": "display_name_value", - "state": 1, - "type_": 1, - "labels": {}, - "create_time": {"seconds": 751, "nanos": 543}, - "satisfies_pzs": True, - "satisfies_pzi": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.PartialUpdateInstanceRequest.meta.fields[ - "instance" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["instance"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["instance"][field])): - del request_init["instance"][field][i][subfield] - else: - del request_init["instance"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.partial_update_instance(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_partial_update_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_instance" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_partial_update_instance_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_instance" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( - bigtable_instance_admin.PartialUpdateInstanceRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_instance_admin.PartialUpdateInstanceRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.partial_update_instance( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_delete_instance_rest_bad_request( - request_type=bigtable_instance_admin.DeleteInstanceRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_instance(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteInstanceRequest, - dict, - ], -) -def test_delete_instance_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "" - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_instance(request) - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_instance" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteInstanceRequest.pb( - bigtable_instance_admin.DeleteInstanceRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - request = bigtable_instance_admin.DeleteInstanceRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_instance( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_create_cluster_rest_bad_request( - request_type=bigtable_instance_admin.CreateClusterRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_cluster(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateClusterRequest, - dict, - ], -) -def test_create_cluster_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["cluster"] = { - "name": "name_value", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "node_scaling_factor": 1, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } - }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.CreateClusterRequest.meta.fields["cluster"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["cluster"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["cluster"][field])): - del request_init["cluster"][field][i][subfield] - else: - del request_init["cluster"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_cluster(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_cluster" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_create_cluster_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_cluster" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.CreateClusterRequest.pb( - bigtable_instance_admin.CreateClusterRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_instance_admin.CreateClusterRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.create_cluster( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_get_cluster_rest_bad_request( - request_type=bigtable_instance_admin.GetClusterRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_cluster(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetClusterRequest, - dict, - ], -) -def test_get_cluster_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Cluster( - name="name_value", - location="location_value", - state=instance.Cluster.State.READY, - serve_nodes=1181, - node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, - default_storage_type=common.StorageType.SSD, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_cluster(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Cluster) - assert response.name == "name_value" - assert response.location == "location_value" - assert response.state == instance.Cluster.State.READY - assert response.serve_nodes == 1181 - assert ( - response.node_scaling_factor - == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X - ) - assert response.default_storage_type == common.StorageType.SSD - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_cluster" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_get_cluster_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_cluster" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.GetClusterRequest.pb( - bigtable_instance_admin.GetClusterRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = instance.Cluster.to_json(instance.Cluster()) - req.return_value.content = return_value - - request = bigtable_instance_admin.GetClusterRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.Cluster() - post_with_metadata.return_value = instance.Cluster(), metadata - - client.get_cluster( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_list_clusters_rest_bad_request( - request_type=bigtable_instance_admin.ListClustersRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_clusters(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListClustersRequest, - dict, - ], -) -def test_list_clusters_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_clusters(request) - - assert response.raw_page is response - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListClustersResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_clusters_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_clusters" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_list_clusters_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_clusters" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.ListClustersRequest.pb( - bigtable_instance_admin.ListClustersRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_instance_admin.ListClustersResponse.to_json( - bigtable_instance_admin.ListClustersResponse() - ) - req.return_value.content = return_value - - request = bigtable_instance_admin.ListClustersRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListClustersResponse() - post_with_metadata.return_value = ( - bigtable_instance_admin.ListClustersResponse(), - metadata, - ) - - client.list_clusters( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_update_cluster_rest_bad_request(request_type=instance.Cluster): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_cluster(request) - - -@pytest.mark.parametrize( - "request_type", - [ - instance.Cluster, - dict, - ], -) -def test_update_cluster_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_cluster(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_cluster" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_update_cluster_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_cluster" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = instance.Cluster.pb(instance.Cluster()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = instance.Cluster() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.update_cluster( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_partial_update_cluster_rest_bad_request( - request_type=bigtable_instance_admin.PartialUpdateClusterRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.partial_update_cluster(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.PartialUpdateClusterRequest, - dict, - ], -) -def test_partial_update_cluster_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} - } - request_init["cluster"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "node_scaling_factor": 1, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } - }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.PartialUpdateClusterRequest.meta.fields[ - "cluster" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["cluster"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["cluster"][field])): - del request_init["cluster"][field][i][subfield] - else: - del request_init["cluster"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.partial_update_cluster(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_partial_update_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_cluster" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_partial_update_cluster_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_cluster" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.PartialUpdateClusterRequest.pb( - bigtable_instance_admin.PartialUpdateClusterRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_instance_admin.PartialUpdateClusterRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.partial_update_cluster( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_delete_cluster_rest_bad_request( - request_type=bigtable_instance_admin.DeleteClusterRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_cluster(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteClusterRequest, - dict, - ], -) -def test_delete_cluster_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "" - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_cluster(request) - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_cluster" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteClusterRequest.pb( - bigtable_instance_admin.DeleteClusterRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - request = bigtable_instance_admin.DeleteClusterRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_cluster( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_create_app_profile_rest_bad_request( - request_type=bigtable_instance_admin.CreateAppProfileRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_app_profile(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateAppProfileRequest, - dict, - ], -) -def test_create_app_profile_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["app_profile"] = { - "name": "name_value", - "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], - "row_affinity": {}, - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - "priority": 1, - "standard_isolation": {"priority": 1}, - "data_boost_isolation_read_only": {"compute_billing_owner": 1}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.CreateAppProfileRequest.meta.fields[ - "app_profile" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["app_profile"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["app_profile"][field])): - del request_init["app_profile"][field][i][subfield] - else: - del request_init["app_profile"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - priority=instance.AppProfile.Priority.PRIORITY_LOW, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_app_profile(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_app_profile_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_app_profile" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_create_app_profile_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_app_profile" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.CreateAppProfileRequest.pb( - bigtable_instance_admin.CreateAppProfileRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = instance.AppProfile.to_json(instance.AppProfile()) - req.return_value.content = return_value - - request = bigtable_instance_admin.CreateAppProfileRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.AppProfile() - post_with_metadata.return_value = instance.AppProfile(), metadata - - client.create_app_profile( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_get_app_profile_rest_bad_request( - request_type=bigtable_instance_admin.GetAppProfileRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_app_profile(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetAppProfileRequest, - dict, - ], -) -def test_get_app_profile_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - priority=instance.AppProfile.Priority.PRIORITY_LOW, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_app_profile(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_app_profile_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_app_profile" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_get_app_profile_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_app_profile" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.GetAppProfileRequest.pb( - bigtable_instance_admin.GetAppProfileRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = instance.AppProfile.to_json(instance.AppProfile()) - req.return_value.content = return_value - - request = bigtable_instance_admin.GetAppProfileRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.AppProfile() - post_with_metadata.return_value = instance.AppProfile(), metadata - - client.get_app_profile( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_list_app_profiles_rest_bad_request( - request_type=bigtable_instance_admin.ListAppProfilesRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_app_profiles(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListAppProfilesRequest, - dict, - ], -) -def test_list_app_profiles_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse( - next_page_token="next_page_token_value", - failed_locations=["failed_locations_value"], - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_app_profiles(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAppProfilesPager) - assert response.next_page_token == "next_page_token_value" - assert response.failed_locations == ["failed_locations_value"] - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_app_profiles_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_app_profiles" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_list_app_profiles_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_app_profiles" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.ListAppProfilesRequest.pb( - bigtable_instance_admin.ListAppProfilesRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_instance_admin.ListAppProfilesResponse.to_json( - bigtable_instance_admin.ListAppProfilesResponse() - ) - req.return_value.content = return_value - - request = bigtable_instance_admin.ListAppProfilesRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListAppProfilesResponse() - post_with_metadata.return_value = ( - bigtable_instance_admin.ListAppProfilesResponse(), - metadata, - ) - - client.list_app_profiles( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_update_app_profile_rest_bad_request( - request_type=bigtable_instance_admin.UpdateAppProfileRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "app_profile": { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_app_profile(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.UpdateAppProfileRequest, - dict, - ], -) -def test_update_app_profile_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "app_profile": { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - } - request_init["app_profile"] = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3", - "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], - "row_affinity": {}, - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - "priority": 1, - "standard_isolation": {"priority": 1}, - "data_boost_isolation_read_only": {"compute_billing_owner": 1}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.UpdateAppProfileRequest.meta.fields[ - "app_profile" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["app_profile"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["app_profile"][field])): - del request_init["app_profile"][field][i][subfield] - else: - del request_init["app_profile"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_app_profile(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_app_profile_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_app_profile" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_update_app_profile_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_app_profile" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.UpdateAppProfileRequest.pb( - bigtable_instance_admin.UpdateAppProfileRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_instance_admin.UpdateAppProfileRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.update_app_profile( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_delete_app_profile_rest_bad_request( - request_type=bigtable_instance_admin.DeleteAppProfileRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_app_profile(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteAppProfileRequest, - dict, - ], -) -def test_delete_app_profile_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "" - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_app_profile(request) - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_app_profile_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_app_profile" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteAppProfileRequest.pb( - bigtable_instance_admin.DeleteAppProfileRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - request = bigtable_instance_admin.DeleteAppProfileRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_app_profile( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_get_iam_policy_rest_bad_request( - request_type=iam_policy_pb2.GetIamPolicyRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_iam_policy(request) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.GetIamPolicyRequest, - dict, - ], -) -def test_get_iam_policy_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_iam_policy(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_iam_policy_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_iam_policy" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_get_iam_policy_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_iam_policy" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = iam_policy_pb2.GetIamPolicyRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(policy_pb2.Policy()) - req.return_value.content = return_value - - request = iam_policy_pb2.GetIamPolicyRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - post_with_metadata.return_value = policy_pb2.Policy(), metadata - - client.get_iam_policy( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_set_iam_policy_rest_bad_request( - request_type=iam_policy_pb2.SetIamPolicyRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.set_iam_policy(request) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.SetIamPolicyRequest, - dict, - ], -) -def test_set_iam_policy_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.set_iam_policy(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_set_iam_policy_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_set_iam_policy" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_set_iam_policy_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_set_iam_policy" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = iam_policy_pb2.SetIamPolicyRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(policy_pb2.Policy()) - req.return_value.content = return_value - - request = iam_policy_pb2.SetIamPolicyRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - post_with_metadata.return_value = policy_pb2.Policy(), metadata - - client.set_iam_policy( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_test_iam_permissions_rest_bad_request( - request_type=iam_policy_pb2.TestIamPermissionsRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.test_iam_permissions(request) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.TestIamPermissionsRequest, - dict, - ], -) -def test_test_iam_permissions_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.test_iam_permissions(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_test_iam_permissions_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_test_iam_permissions" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_test_iam_permissions_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_test_iam_permissions" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = iam_policy_pb2.TestIamPermissionsRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson( - iam_policy_pb2.TestIamPermissionsResponse() - ) - req.return_value.content = return_value - - request = iam_policy_pb2.TestIamPermissionsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = iam_policy_pb2.TestIamPermissionsResponse() - post_with_metadata.return_value = ( - iam_policy_pb2.TestIamPermissionsResponse(), - metadata, - ) - - client.test_iam_permissions( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_list_hot_tablets_rest_bad_request( - request_type=bigtable_instance_admin.ListHotTabletsRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_hot_tablets(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListHotTabletsRequest, - dict, - ], -) -def test_list_hot_tablets_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse( - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_hot_tablets(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHotTabletsPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_hot_tablets_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_hot_tablets" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_list_hot_tablets_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_hot_tablets" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.ListHotTabletsRequest.pb( - bigtable_instance_admin.ListHotTabletsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_instance_admin.ListHotTabletsResponse.to_json( - bigtable_instance_admin.ListHotTabletsResponse() - ) - req.return_value.content = return_value - - request = bigtable_instance_admin.ListHotTabletsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListHotTabletsResponse() - post_with_metadata.return_value = ( - bigtable_instance_admin.ListHotTabletsResponse(), - metadata, - ) - - client.list_hot_tablets( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_create_logical_view_rest_bad_request( - request_type=bigtable_instance_admin.CreateLogicalViewRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_logical_view(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateLogicalViewRequest, - dict, - ], -) -def test_create_logical_view_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["logical_view"] = { - "name": "name_value", - "query": "query_value", - "etag": "etag_value", - "deletion_protection": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.CreateLogicalViewRequest.meta.fields[ - "logical_view" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["logical_view"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["logical_view"][field])): - del request_init["logical_view"][field][i][subfield] - else: - del request_init["logical_view"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_logical_view(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_logical_view_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_logical_view" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_create_logical_view_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_logical_view" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.CreateLogicalViewRequest.pb( - bigtable_instance_admin.CreateLogicalViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_instance_admin.CreateLogicalViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.create_logical_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_get_logical_view_rest_bad_request( - request_type=bigtable_instance_admin.GetLogicalViewRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/logicalViews/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_logical_view(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetLogicalViewRequest, - dict, - ], -) -def test_get_logical_view_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/logicalViews/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.LogicalView( - name="name_value", - query="query_value", - etag="etag_value", - deletion_protection=True, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.LogicalView.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_logical_view(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.LogicalView) - assert response.name == "name_value" - assert response.query == "query_value" - assert response.etag == "etag_value" - assert response.deletion_protection is True - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_logical_view_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_logical_view" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_get_logical_view_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_logical_view" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.GetLogicalViewRequest.pb( - bigtable_instance_admin.GetLogicalViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = instance.LogicalView.to_json(instance.LogicalView()) - req.return_value.content = return_value - - request = bigtable_instance_admin.GetLogicalViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.LogicalView() - post_with_metadata.return_value = instance.LogicalView(), metadata - - client.get_logical_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_list_logical_views_rest_bad_request( - request_type=bigtable_instance_admin.ListLogicalViewsRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_logical_views(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListLogicalViewsRequest, - dict, - ], -) -def test_list_logical_views_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListLogicalViewsResponse( - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListLogicalViewsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_logical_views(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListLogicalViewsPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_logical_views_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_logical_views" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_list_logical_views_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_logical_views" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.ListLogicalViewsRequest.pb( - bigtable_instance_admin.ListLogicalViewsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_instance_admin.ListLogicalViewsResponse.to_json( - bigtable_instance_admin.ListLogicalViewsResponse() - ) - req.return_value.content = return_value - - request = bigtable_instance_admin.ListLogicalViewsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListLogicalViewsResponse() - post_with_metadata.return_value = ( - bigtable_instance_admin.ListLogicalViewsResponse(), - metadata, - ) - - client.list_logical_views( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_update_logical_view_rest_bad_request( - request_type=bigtable_instance_admin.UpdateLogicalViewRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "logical_view": { - "name": "projects/sample1/instances/sample2/logicalViews/sample3" - } - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_logical_view(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.UpdateLogicalViewRequest, - dict, - ], -) -def test_update_logical_view_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "logical_view": { - "name": "projects/sample1/instances/sample2/logicalViews/sample3" - } - } - request_init["logical_view"] = { - "name": "projects/sample1/instances/sample2/logicalViews/sample3", - "query": "query_value", - "etag": "etag_value", - "deletion_protection": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.UpdateLogicalViewRequest.meta.fields[ - "logical_view" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["logical_view"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["logical_view"][field])): - del request_init["logical_view"][field][i][subfield] - else: - del request_init["logical_view"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_logical_view(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_logical_view_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_logical_view" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_update_logical_view_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_logical_view" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.UpdateLogicalViewRequest.pb( - bigtable_instance_admin.UpdateLogicalViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_instance_admin.UpdateLogicalViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.update_logical_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_delete_logical_view_rest_bad_request( - request_type=bigtable_instance_admin.DeleteLogicalViewRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/logicalViews/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_logical_view(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteLogicalViewRequest, - dict, - ], -) -def test_delete_logical_view_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/logicalViews/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "" - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_logical_view(request) - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_logical_view_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_logical_view" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteLogicalViewRequest.pb( - bigtable_instance_admin.DeleteLogicalViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - request = bigtable_instance_admin.DeleteLogicalViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_logical_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_create_materialized_view_rest_bad_request( - request_type=bigtable_instance_admin.CreateMaterializedViewRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_materialized_view(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateMaterializedViewRequest, - dict, - ], -) -def test_create_materialized_view_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["materialized_view"] = { - "name": "name_value", - "query": "query_value", - "etag": "etag_value", - "deletion_protection": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.CreateMaterializedViewRequest.meta.fields[ - "materialized_view" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["materialized_view"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["materialized_view"][field])): - del request_init["materialized_view"][field][i][subfield] - else: - del request_init["materialized_view"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_materialized_view(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_materialized_view_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_materialized_view" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_create_materialized_view_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_materialized_view" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.CreateMaterializedViewRequest.pb( - bigtable_instance_admin.CreateMaterializedViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_instance_admin.CreateMaterializedViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.create_materialized_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_get_materialized_view_rest_bad_request( - request_type=bigtable_instance_admin.GetMaterializedViewRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/materializedViews/sample3" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_materialized_view(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetMaterializedViewRequest, - dict, - ], -) -def test_get_materialized_view_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/materializedViews/sample3" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.MaterializedView( - name="name_value", - query="query_value", - etag="etag_value", - deletion_protection=True, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.MaterializedView.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_materialized_view(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.MaterializedView) - assert response.name == "name_value" - assert response.query == "query_value" - assert response.etag == "etag_value" - assert response.deletion_protection is True - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_materialized_view_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_materialized_view" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_get_materialized_view_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_materialized_view" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.GetMaterializedViewRequest.pb( - bigtable_instance_admin.GetMaterializedViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = instance.MaterializedView.to_json(instance.MaterializedView()) - req.return_value.content = return_value - - request = bigtable_instance_admin.GetMaterializedViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.MaterializedView() - post_with_metadata.return_value = instance.MaterializedView(), metadata - - client.get_materialized_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_list_materialized_views_rest_bad_request( - request_type=bigtable_instance_admin.ListMaterializedViewsRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_materialized_views(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListMaterializedViewsRequest, - dict, - ], -) -def test_list_materialized_views_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListMaterializedViewsResponse( - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListMaterializedViewsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_materialized_views(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMaterializedViewsPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_materialized_views_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_materialized_views" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_list_materialized_views_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_materialized_views" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.ListMaterializedViewsRequest.pb( - bigtable_instance_admin.ListMaterializedViewsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_instance_admin.ListMaterializedViewsResponse.to_json( - bigtable_instance_admin.ListMaterializedViewsResponse() - ) - req.return_value.content = return_value - - request = bigtable_instance_admin.ListMaterializedViewsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() - post_with_metadata.return_value = ( - bigtable_instance_admin.ListMaterializedViewsResponse(), - metadata, - ) - - client.list_materialized_views( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_update_materialized_view_rest_bad_request( - request_type=bigtable_instance_admin.UpdateMaterializedViewRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "materialized_view": { - "name": "projects/sample1/instances/sample2/materializedViews/sample3" - } - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_materialized_view(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.UpdateMaterializedViewRequest, - dict, - ], -) -def test_update_materialized_view_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "materialized_view": { - "name": "projects/sample1/instances/sample2/materializedViews/sample3" - } - } - request_init["materialized_view"] = { - "name": "projects/sample1/instances/sample2/materializedViews/sample3", - "query": "query_value", - "etag": "etag_value", - "deletion_protection": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.UpdateMaterializedViewRequest.meta.fields[ - "materialized_view" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["materialized_view"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["materialized_view"][field])): - del request_init["materialized_view"][field][i][subfield] - else: - del request_init["materialized_view"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_materialized_view(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_materialized_view_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_materialized_view" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, - "post_update_materialized_view_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_materialized_view" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_instance_admin.UpdateMaterializedViewRequest.pb( - bigtable_instance_admin.UpdateMaterializedViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_instance_admin.UpdateMaterializedViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.update_materialized_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_delete_materialized_view_rest_bad_request( - request_type=bigtable_instance_admin.DeleteMaterializedViewRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/materializedViews/sample3" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_materialized_view(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteMaterializedViewRequest, - dict, - ], -) -def test_delete_materialized_view_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/materializedViews/sample3" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "" - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_materialized_view(request) - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_materialized_view_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_materialized_view" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteMaterializedViewRequest.pb( - bigtable_instance_admin.DeleteMaterializedViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - request = bigtable_instance_admin.DeleteMaterializedViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_materialized_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_initialize_client_w_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - assert client is not None - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_instance_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - client.create_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateInstanceRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_instance_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - client.get_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetInstanceRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_instances_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - client.list_instances(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListInstancesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_instance_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - client.update_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = instance.Instance() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_partial_update_instance_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - client.partial_update_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_instance_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - client.delete_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteInstanceRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_cluster_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - client.create_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateClusterRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_cluster_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - client.get_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetClusterRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_clusters_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - client.list_clusters(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListClustersRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_cluster_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - client.update_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = instance.Cluster() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_partial_update_cluster_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - client.partial_update_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_cluster_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - client.delete_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteClusterRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_app_profile_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - client.create_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateAppProfileRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_app_profile_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - client.get_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetAppProfileRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_app_profiles_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - client.list_app_profiles(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListAppProfilesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_app_profile_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - client.update_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.UpdateAppProfileRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_app_profile_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - client.delete_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteAppProfileRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_iam_policy_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - client.get_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.GetIamPolicyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_set_iam_policy_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - client.set_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.SetIamPolicyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_test_iam_permissions_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - client.test_iam_permissions(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.TestIamPermissionsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_hot_tablets_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - client.list_hot_tablets(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListHotTabletsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_logical_view_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_logical_view), "__call__" - ) as call: - client.create_logical_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateLogicalViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_logical_view_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: - client.get_logical_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetLogicalViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_logical_views_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_logical_views), "__call__" - ) as call: - client.list_logical_views(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListLogicalViewsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_logical_view_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_logical_view), "__call__" - ) as call: - client.update_logical_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.UpdateLogicalViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_logical_view_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_logical_view), "__call__" - ) as call: - client.delete_logical_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteLogicalViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_materialized_view_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_materialized_view), "__call__" - ) as call: - client.create_materialized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateMaterializedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_materialized_view_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_materialized_view), "__call__" - ) as call: - client.get_materialized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetMaterializedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_materialized_views_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_materialized_views), "__call__" - ) as call: - client.list_materialized_views(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListMaterializedViewsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_materialized_view_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_materialized_view), "__call__" - ) as call: - client.update_materialized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.UpdateMaterializedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_materialized_view_empty_call_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_materialized_view), "__call__" - ) as call: - client.delete_materialized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteMaterializedViewRequest() - - assert args[0] == request_msg - - -def test_bigtable_instance_admin_rest_lro_client(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - transport = client.transport - - # Ensure that we have an api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.AbstractOperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.BigtableInstanceAdminGrpcTransport, - ) - - -def test_bigtable_instance_admin_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.BigtableInstanceAdminTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json", - ) - - -def test_bigtable_instance_admin_base_transport(): - # Instantiate the base transport. - with mock.patch( - "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport.__init__" - ) as Transport: - Transport.return_value = None - transport = transports.BigtableInstanceAdminTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - "create_instance", - "get_instance", - "list_instances", - "update_instance", - "partial_update_instance", - "delete_instance", - "create_cluster", - "get_cluster", - "list_clusters", - "update_cluster", - "partial_update_cluster", - "delete_cluster", - "create_app_profile", - "get_app_profile", - "list_app_profiles", - "update_app_profile", - "delete_app_profile", - "get_iam_policy", - "set_iam_policy", - "test_iam_permissions", - "list_hot_tablets", - "create_logical_view", - "get_logical_view", - "list_logical_views", - "update_logical_view", - "delete_logical_view", - "create_materialized_view", - "get_materialized_view", - "list_materialized_views", - "update_materialized_view", - "delete_materialized_view", - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - # Catch all for all remaining methods and properties - remainder = [ - "kind", - ] - for r in remainder: - with pytest.raises(NotImplementedError): - getattr(transport, r)() - - -def test_bigtable_instance_admin_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch( - "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages" - ) as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.BigtableInstanceAdminTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with( - "credentials.json", - scopes=None, - default_scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id="octopus", - ) - - -def test_bigtable_instance_admin_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( - "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages" - ) as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.BigtableInstanceAdminTransport() - adc.assert_called_once() - - -def test_bigtable_instance_admin_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - BigtableInstanceAdminClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - ], -) -def test_bigtable_instance_admin_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - transports.BigtableInstanceAdminRestTransport, - ], -) -def test_bigtable_instance_admin_transport_auth_gdch_credentials(transport_class): - host = "https://language.com" - api_audience_tests = [None, "https://language2.com"] - api_audience_expect = [host, "https://language2.com"] - for t, e in zip(api_audience_tests, api_audience_expect): - with mock.patch.object(google.auth, "default", autospec=True) as adc: - gdch_mock = mock.MagicMock() - type(gdch_mock).with_gdch_audience = mock.PropertyMock( - return_value=gdch_mock - ) - adc.return_value = (gdch_mock, None) - transport_class(host=host, api_audience=t) - gdch_mock.with_gdch_audience.assert_called_once_with(e) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.BigtableInstanceAdminGrpcTransport, grpc_helpers), - (transports.BigtableInstanceAdminGrpcAsyncIOTransport, grpc_helpers_async), - ], -) -def test_bigtable_instance_admin_transport_create_channel( - transport_class, grpc_helpers -): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - - create_channel.assert_called_with( - "bigtableadmin.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - scopes=["1", "2"], - default_host="bigtableadmin.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - ], -) -def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls( - transport_class, -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds, - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback, - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, private_key=expected_key - ) - - -def test_bigtable_instance_admin_http_transport_client_cert_source_for_mtls(): - cred = ga_credentials.AnonymousCredentials() - with mock.patch( - "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" - ) as mock_configure_mtls_channel: - transports.BigtableInstanceAdminRestTransport( - credentials=cred, client_cert_source_for_mtls=client_cert_source_callback - ) - mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) - - -@pytest.mark.parametrize( - "transport_name", - [ - "grpc", - "grpc_asyncio", - "rest", - ], -) -def test_bigtable_instance_admin_host_no_port(transport_name): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions( - api_endpoint="bigtableadmin.googleapis.com" - ), - transport=transport_name, - ) - assert client.transport._host == ( - "bigtableadmin.googleapis.com:443" - if transport_name in ["grpc", "grpc_asyncio"] - else "https://bigtableadmin.googleapis.com" - ) - - -@pytest.mark.parametrize( - "transport_name", - [ - "grpc", - "grpc_asyncio", - "rest", - ], -) -def test_bigtable_instance_admin_host_with_port(transport_name): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions( - api_endpoint="bigtableadmin.googleapis.com:8000" - ), - transport=transport_name, - ) - assert client.transport._host == ( - "bigtableadmin.googleapis.com:8000" - if transport_name in ["grpc", "grpc_asyncio"] - else "https://bigtableadmin.googleapis.com:8000" - ) - - -@pytest.mark.parametrize( - "transport_name", - [ - "rest", - ], -) -def test_bigtable_instance_admin_client_transport_session_collision(transport_name): - creds1 = ga_credentials.AnonymousCredentials() - creds2 = ga_credentials.AnonymousCredentials() - client1 = BigtableInstanceAdminClient( - credentials=creds1, - transport=transport_name, - ) - client2 = BigtableInstanceAdminClient( - credentials=creds2, - transport=transport_name, - ) - session1 = client1.transport.create_instance._session - session2 = client2.transport.create_instance._session - assert session1 != session2 - session1 = client1.transport.get_instance._session - session2 = client2.transport.get_instance._session - assert session1 != session2 - session1 = client1.transport.list_instances._session - session2 = client2.transport.list_instances._session - assert session1 != session2 - session1 = client1.transport.update_instance._session - session2 = client2.transport.update_instance._session - assert session1 != session2 - session1 = client1.transport.partial_update_instance._session - session2 = client2.transport.partial_update_instance._session - assert session1 != session2 - session1 = client1.transport.delete_instance._session - session2 = client2.transport.delete_instance._session - assert session1 != session2 - session1 = client1.transport.create_cluster._session - session2 = client2.transport.create_cluster._session - assert session1 != session2 - session1 = client1.transport.get_cluster._session - session2 = client2.transport.get_cluster._session - assert session1 != session2 - session1 = client1.transport.list_clusters._session - session2 = client2.transport.list_clusters._session - assert session1 != session2 - session1 = client1.transport.update_cluster._session - session2 = client2.transport.update_cluster._session - assert session1 != session2 - session1 = client1.transport.partial_update_cluster._session - session2 = client2.transport.partial_update_cluster._session - assert session1 != session2 - session1 = client1.transport.delete_cluster._session - session2 = client2.transport.delete_cluster._session - assert session1 != session2 - session1 = client1.transport.create_app_profile._session - session2 = client2.transport.create_app_profile._session - assert session1 != session2 - session1 = client1.transport.get_app_profile._session - session2 = client2.transport.get_app_profile._session - assert session1 != session2 - session1 = client1.transport.list_app_profiles._session - session2 = client2.transport.list_app_profiles._session - assert session1 != session2 - session1 = client1.transport.update_app_profile._session - session2 = client2.transport.update_app_profile._session - assert session1 != session2 - session1 = client1.transport.delete_app_profile._session - session2 = client2.transport.delete_app_profile._session - assert session1 != session2 - session1 = client1.transport.get_iam_policy._session - session2 = client2.transport.get_iam_policy._session - assert session1 != session2 - session1 = client1.transport.set_iam_policy._session - session2 = client2.transport.set_iam_policy._session - assert session1 != session2 - session1 = client1.transport.test_iam_permissions._session - session2 = client2.transport.test_iam_permissions._session - assert session1 != session2 - session1 = client1.transport.list_hot_tablets._session - session2 = client2.transport.list_hot_tablets._session - assert session1 != session2 - session1 = client1.transport.create_logical_view._session - session2 = client2.transport.create_logical_view._session - assert session1 != session2 - session1 = client1.transport.get_logical_view._session - session2 = client2.transport.get_logical_view._session - assert session1 != session2 - session1 = client1.transport.list_logical_views._session - session2 = client2.transport.list_logical_views._session - assert session1 != session2 - session1 = client1.transport.update_logical_view._session - session2 = client2.transport.update_logical_view._session - assert session1 != session2 - session1 = client1.transport.delete_logical_view._session - session2 = client2.transport.delete_logical_view._session - assert session1 != session2 - session1 = client1.transport.create_materialized_view._session - session2 = client2.transport.create_materialized_view._session - assert session1 != session2 - session1 = client1.transport.get_materialized_view._session - session2 = client2.transport.get_materialized_view._session - assert session1 != session2 - session1 = client1.transport.list_materialized_views._session - session2 = client2.transport.list_materialized_views._session - assert session1 != session2 - session1 = client1.transport.update_materialized_view._session - session2 = client2.transport.update_materialized_view._session - assert session1 != session2 - session1 = client1.transport.delete_materialized_view._session - session2 = client2.transport.delete_materialized_view._session - assert session1 != session2 - - -def test_bigtable_instance_admin_grpc_transport_channel(): - channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.BigtableInstanceAdminGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_bigtable_instance_admin_grpc_asyncio_transport_channel(): - channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - ], -) -def test_bigtable_instance_admin_transport_channel_mtls_with_client_cert_source( - transport_class, -): - with mock.patch( - "grpc.ssl_channel_credentials", autospec=True - ) as grpc_ssl_channel_cred: - with mock.patch.object( - transport_class, "create_channel" - ) as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - ], -) -def test_bigtable_instance_admin_transport_channel_mtls_with_adc(transport_class): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object( - transport_class, "create_channel" - ) as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_bigtable_instance_admin_grpc_lro_client(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_bigtable_instance_admin_grpc_lro_async_client(): - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_app_profile_path(): - project = "squid" - instance = "clam" - app_profile = "whelk" - expected = ( - "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format( - project=project, - instance=instance, - app_profile=app_profile, - ) - ) - actual = BigtableInstanceAdminClient.app_profile_path( - project, instance, app_profile - ) - assert expected == actual - - -def test_parse_app_profile_path(): - expected = { - "project": "octopus", - "instance": "oyster", - "app_profile": "nudibranch", - } - path = BigtableInstanceAdminClient.app_profile_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_app_profile_path(path) - assert expected == actual - - -def test_cluster_path(): - project = "cuttlefish" - instance = "mussel" - cluster = "winkle" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format( - project=project, - instance=instance, - cluster=cluster, - ) - actual = BigtableInstanceAdminClient.cluster_path(project, instance, cluster) - assert expected == actual - - -def test_parse_cluster_path(): - expected = { - "project": "nautilus", - "instance": "scallop", - "cluster": "abalone", - } - path = BigtableInstanceAdminClient.cluster_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_cluster_path(path) - assert expected == actual - - -def test_crypto_key_path(): - project = "squid" - location = "clam" - key_ring = "whelk" - crypto_key = "octopus" - expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format( - project=project, - location=location, - key_ring=key_ring, - crypto_key=crypto_key, - ) - actual = BigtableInstanceAdminClient.crypto_key_path( - project, location, key_ring, crypto_key - ) - assert expected == actual - - -def test_parse_crypto_key_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "key_ring": "cuttlefish", - "crypto_key": "mussel", - } - path = BigtableInstanceAdminClient.crypto_key_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_crypto_key_path(path) - assert expected == actual - - -def test_hot_tablet_path(): - project = "winkle" - instance = "nautilus" - cluster = "scallop" - hot_tablet = "abalone" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/{hot_tablet}".format( - project=project, - instance=instance, - cluster=cluster, - hot_tablet=hot_tablet, - ) - actual = BigtableInstanceAdminClient.hot_tablet_path( - project, instance, cluster, hot_tablet - ) - assert expected == actual - - -def test_parse_hot_tablet_path(): - expected = { - "project": "squid", - "instance": "clam", - "cluster": "whelk", - "hot_tablet": "octopus", - } - path = BigtableInstanceAdminClient.hot_tablet_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_hot_tablet_path(path) - assert expected == actual - - -def test_instance_path(): - project = "oyster" - instance = "nudibranch" - expected = "projects/{project}/instances/{instance}".format( - project=project, - instance=instance, - ) - actual = BigtableInstanceAdminClient.instance_path(project, instance) - assert expected == actual - - -def test_parse_instance_path(): - expected = { - "project": "cuttlefish", - "instance": "mussel", - } - path = BigtableInstanceAdminClient.instance_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_instance_path(path) - assert expected == actual - - -def test_logical_view_path(): - project = "winkle" - instance = "nautilus" - logical_view = "scallop" - expected = ( - "projects/{project}/instances/{instance}/logicalViews/{logical_view}".format( - project=project, - instance=instance, - logical_view=logical_view, - ) - ) - actual = BigtableInstanceAdminClient.logical_view_path( - project, instance, logical_view - ) - assert expected == actual - - -def test_parse_logical_view_path(): - expected = { - "project": "abalone", - "instance": "squid", - "logical_view": "clam", - } - path = BigtableInstanceAdminClient.logical_view_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_logical_view_path(path) - assert expected == actual - - -def test_materialized_view_path(): - project = "whelk" - instance = "octopus" - materialized_view = "oyster" - expected = "projects/{project}/instances/{instance}/materializedViews/{materialized_view}".format( - project=project, - instance=instance, - materialized_view=materialized_view, - ) - actual = BigtableInstanceAdminClient.materialized_view_path( - project, instance, materialized_view - ) - assert expected == actual - - -def test_parse_materialized_view_path(): - expected = { - "project": "nudibranch", - "instance": "cuttlefish", - "materialized_view": "mussel", - } - path = BigtableInstanceAdminClient.materialized_view_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_materialized_view_path(path) - assert expected == actual - - -def test_table_path(): - project = "winkle" - instance = "nautilus" - table = "scallop" - expected = "projects/{project}/instances/{instance}/tables/{table}".format( - project=project, - instance=instance, - table=table, - ) - actual = BigtableInstanceAdminClient.table_path(project, instance, table) - assert expected == actual - - -def test_parse_table_path(): - expected = { - "project": "abalone", - "instance": "squid", - "table": "clam", - } - path = BigtableInstanceAdminClient.table_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_table_path(path) - assert expected == actual - - -def test_common_billing_account_path(): - billing_account = "whelk" - expected = "billingAccounts/{billing_account}".format( - billing_account=billing_account, - ) - actual = BigtableInstanceAdminClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "octopus", - } - path = BigtableInstanceAdminClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_common_billing_account_path(path) - assert expected == actual - - -def test_common_folder_path(): - folder = "oyster" - expected = "folders/{folder}".format( - folder=folder, - ) - actual = BigtableInstanceAdminClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nudibranch", - } - path = BigtableInstanceAdminClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_common_folder_path(path) - assert expected == actual - - -def test_common_organization_path(): - organization = "cuttlefish" - expected = "organizations/{organization}".format( - organization=organization, - ) - actual = BigtableInstanceAdminClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "mussel", - } - path = BigtableInstanceAdminClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_common_organization_path(path) - assert expected == actual - - -def test_common_project_path(): - project = "winkle" - expected = "projects/{project}".format( - project=project, - ) - actual = BigtableInstanceAdminClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nautilus", - } - path = BigtableInstanceAdminClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_common_project_path(path) - assert expected == actual - - -def test_common_location_path(): - project = "scallop" - location = "abalone" - expected = "projects/{project}/locations/{location}".format( - project=project, - location=location, - ) - actual = BigtableInstanceAdminClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "squid", - "location": "clam", - } - path = BigtableInstanceAdminClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_with_default_client_info(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object( - transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages" - ) as prep: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object( - transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages" - ) as prep: - transport_class = BigtableInstanceAdminClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -def test_transport_close_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc" - ) - with mock.patch.object( - type(getattr(client.transport, "_grpc_channel")), "close" - ) as close: - with client: - close.assert_not_called() - close.assert_called_once() - - -@pytest.mark.asyncio -async def test_transport_close_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), transport="grpc_asyncio" - ) - with mock.patch.object( - type(getattr(client.transport, "_grpc_channel")), "close" - ) as close: - async with client: - close.assert_not_called() - close.assert_called_once() - - -def test_transport_close_rest(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - with mock.patch.object( - type(getattr(client.transport, "_session")), "close" - ) as close: - with client: - close.assert_not_called() - close.assert_called_once() - - -def test_client_ctx(): - transports = [ - "rest", - "grpc", - ] - for transport in transports: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() - - -@pytest.mark.parametrize( - "client_class,transport_class", - [ - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport), - ( - BigtableInstanceAdminAsyncClient, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - ), - ], -) -def test_api_key_credentials(client_class, transport_class): - with mock.patch.object( - google.auth._default, "get_api_key_credentials", create=True - ) as get_api_key_credentials: - mock_cred = mock.Mock() - get_api_key_credentials.return_value = mock_cred - options = client_options.ClientOptions() - options.api_key = "api_key" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=mock_cred, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py deleted file mode 100644 index c73f650d9..000000000 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ /dev/null @@ -1,29631 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os - -# try/except added for compatibility with python < 3.8 -try: - from unittest import mock - from unittest.mock import AsyncMock # pragma: NO COVER -except ImportError: # pragma: NO COVER - import mock - -import grpc -from grpc.experimental import aio -from collections.abc import Iterable, AsyncIterable -from google.protobuf import json_format -import json -import math -import pytest -from google.api_core import api_core_version -from proto.marshal.rules.dates import DurationRule, TimestampRule -from proto.marshal.rules import wrappers -from requests import Response -from requests import Request, PreparedRequest -from requests.sessions import Session -from google.protobuf import json_format - -try: - from google.auth.aio import credentials as ga_credentials_async - - HAS_GOOGLE_AUTH_AIO = True -except ImportError: # pragma: NO COVER - HAS_GOOGLE_AUTH_AIO = False - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BaseBigtableTableAdminAsyncClient, -) -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BaseBigtableTableAdminClient, -) -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.cloud.bigtable_admin_v2.types import types -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import options_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.oauth2 import service_account -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import expr_pb2 # type: ignore -import google.auth - - -CRED_INFO_JSON = { - "credential_source": "/path/to/file", - "credential_type": "service account credentials", - "principal": "service-account@example.com", -} -CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) - - -async def mock_async_gen(data, chunk_size=1): - for i in range(0, len(data)): # pragma: NO COVER - chunk = data[i : i + chunk_size] - yield chunk.encode("utf-8") - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded. -# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107. -def async_anonymous_credentials(): - if HAS_GOOGLE_AUTH_AIO: - return ga_credentials_async.AnonymousCredentials() - return ga_credentials.AnonymousCredentials() - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return ( - "foo.googleapis.com" - if ("localhost" in client.DEFAULT_ENDPOINT) - else client.DEFAULT_ENDPOINT - ) - - -# If default endpoint template is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint template so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint_template(client): - return ( - "test.{UNIVERSE_DOMAIN}" - if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) - else client._DEFAULT_ENDPOINT_TEMPLATE - ) - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert BaseBigtableTableAdminClient._get_default_mtls_endpoint(None) is None - assert ( - BaseBigtableTableAdminClient._get_default_mtls_endpoint(api_endpoint) - == api_mtls_endpoint - ) - assert ( - BaseBigtableTableAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) - == api_mtls_endpoint - ) - assert ( - BaseBigtableTableAdminClient._get_default_mtls_endpoint(sandbox_endpoint) - == sandbox_mtls_endpoint - ) - assert ( - BaseBigtableTableAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) - == sandbox_mtls_endpoint - ) - assert ( - BaseBigtableTableAdminClient._get_default_mtls_endpoint(non_googleapi) - == non_googleapi - ) - - -def test__read_environment_variables(): - assert BaseBigtableTableAdminClient._read_environment_variables() == ( - False, - "auto", - None, - ) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - assert BaseBigtableTableAdminClient._read_environment_variables() == ( - True, - "auto", - None, - ) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - assert BaseBigtableTableAdminClient._read_environment_variables() == ( - False, - "auto", - None, - ) - - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} - ): - with pytest.raises(ValueError) as excinfo: - BaseBigtableTableAdminClient._read_environment_variables() - assert ( - str(excinfo.value) - == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - assert BaseBigtableTableAdminClient._read_environment_variables() == ( - False, - "never", - None, - ) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - assert BaseBigtableTableAdminClient._read_environment_variables() == ( - False, - "always", - None, - ) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): - assert BaseBigtableTableAdminClient._read_environment_variables() == ( - False, - "auto", - None, - ) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - BaseBigtableTableAdminClient._read_environment_variables() - assert ( - str(excinfo.value) - == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - ) - - with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): - assert BaseBigtableTableAdminClient._read_environment_variables() == ( - False, - "auto", - "foo.com", - ) - - -def test__get_client_cert_source(): - mock_provided_cert_source = mock.Mock() - mock_default_cert_source = mock.Mock() - - assert BaseBigtableTableAdminClient._get_client_cert_source(None, False) is None - assert ( - BaseBigtableTableAdminClient._get_client_cert_source( - mock_provided_cert_source, False - ) - is None - ) - assert ( - BaseBigtableTableAdminClient._get_client_cert_source( - mock_provided_cert_source, True - ) - == mock_provided_cert_source - ) - - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", return_value=True - ): - with mock.patch( - "google.auth.transport.mtls.default_client_cert_source", - return_value=mock_default_cert_source, - ): - assert ( - BaseBigtableTableAdminClient._get_client_cert_source(None, True) - is mock_default_cert_source - ) - assert ( - BaseBigtableTableAdminClient._get_client_cert_source( - mock_provided_cert_source, "true" - ) - is mock_provided_cert_source - ) - - -@mock.patch.object( - BaseBigtableTableAdminClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BaseBigtableTableAdminClient), -) -@mock.patch.object( - BaseBigtableTableAdminAsyncClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient), -) -def test__get_api_endpoint(): - api_override = "foo.com" - mock_client_cert_source = mock.Mock() - default_universe = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE - default_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=default_universe - ) - mock_universe = "bar.com" - mock_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=mock_universe - ) - - assert ( - BaseBigtableTableAdminClient._get_api_endpoint( - api_override, mock_client_cert_source, default_universe, "always" - ) - == api_override - ) - assert ( - BaseBigtableTableAdminClient._get_api_endpoint( - None, mock_client_cert_source, default_universe, "auto" - ) - == BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT - ) - assert ( - BaseBigtableTableAdminClient._get_api_endpoint( - None, None, default_universe, "auto" - ) - == default_endpoint - ) - assert ( - BaseBigtableTableAdminClient._get_api_endpoint( - None, None, default_universe, "always" - ) - == BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT - ) - assert ( - BaseBigtableTableAdminClient._get_api_endpoint( - None, mock_client_cert_source, default_universe, "always" - ) - == BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT - ) - assert ( - BaseBigtableTableAdminClient._get_api_endpoint( - None, None, mock_universe, "never" - ) - == mock_endpoint - ) - assert ( - BaseBigtableTableAdminClient._get_api_endpoint( - None, None, default_universe, "never" - ) - == default_endpoint - ) - - with pytest.raises(MutualTLSChannelError) as excinfo: - BaseBigtableTableAdminClient._get_api_endpoint( - None, mock_client_cert_source, mock_universe, "auto" - ) - assert ( - str(excinfo.value) - == "mTLS is not supported in any universe other than googleapis.com." - ) - - -def test__get_universe_domain(): - client_universe_domain = "foo.com" - universe_domain_env = "bar.com" - - assert ( - BaseBigtableTableAdminClient._get_universe_domain( - client_universe_domain, universe_domain_env - ) - == client_universe_domain - ) - assert ( - BaseBigtableTableAdminClient._get_universe_domain(None, universe_domain_env) - == universe_domain_env - ) - assert ( - BaseBigtableTableAdminClient._get_universe_domain(None, None) - == BaseBigtableTableAdminClient._DEFAULT_UNIVERSE - ) - - with pytest.raises(ValueError) as excinfo: - BaseBigtableTableAdminClient._get_universe_domain("", None) - assert str(excinfo.value) == "Universe Domain cannot be an empty string." - - -@pytest.mark.parametrize( - "error_code,cred_info_json,show_cred_info", - [ - (401, CRED_INFO_JSON, True), - (403, CRED_INFO_JSON, True), - (404, CRED_INFO_JSON, True), - (500, CRED_INFO_JSON, False), - (401, None, False), - (403, None, False), - (404, None, False), - (500, None, False), - ], -) -def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): - cred = mock.Mock(["get_cred_info"]) - cred.get_cred_info = mock.Mock(return_value=cred_info_json) - client = BaseBigtableTableAdminClient(credentials=cred) - client._transport._credentials = cred - - error = core_exceptions.GoogleAPICallError("message", details=["foo"]) - error.code = error_code - - client._add_cred_info_for_auth_errors(error) - if show_cred_info: - assert error.details == ["foo", CRED_INFO_STRING] - else: - assert error.details == ["foo"] - - -@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) -def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): - cred = mock.Mock([]) - assert not hasattr(cred, "get_cred_info") - client = BaseBigtableTableAdminClient(credentials=cred) - client._transport._credentials = cred - - error = core_exceptions.GoogleAPICallError("message", details=[]) - error.code = error_code - - client._add_cred_info_for_auth_errors(error) - assert error.details == [] - - -@pytest.mark.parametrize( - "client_class,transport_name", - [ - (BaseBigtableTableAdminClient, "grpc"), - (BaseBigtableTableAdminAsyncClient, "grpc_asyncio"), - (BaseBigtableTableAdminClient, "rest"), - ], -) -def test_base_bigtable_table_admin_client_from_service_account_info( - client_class, transport_name -): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object( - service_account.Credentials, "from_service_account_info" - ) as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info, transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - "bigtableadmin.googleapis.com:443" - if transport_name in ["grpc", "grpc_asyncio"] - else "https://bigtableadmin.googleapis.com" - ) - - -@pytest.mark.parametrize( - "transport_class,transport_name", - [ - (transports.BigtableTableAdminGrpcTransport, "grpc"), - (transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio"), - (transports.BigtableTableAdminRestTransport, "rest"), - ], -) -def test_base_bigtable_table_admin_client_service_account_always_use_jwt( - transport_class, transport_name -): - with mock.patch.object( - service_account.Credentials, "with_always_use_jwt_access", create=True - ) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object( - service_account.Credentials, "with_always_use_jwt_access", create=True - ) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize( - "client_class,transport_name", - [ - (BaseBigtableTableAdminClient, "grpc"), - (BaseBigtableTableAdminAsyncClient, "grpc_asyncio"), - (BaseBigtableTableAdminClient, "rest"), - ], -) -def test_base_bigtable_table_admin_client_from_service_account_file( - client_class, transport_name -): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object( - service_account.Credentials, "from_service_account_file" - ) as factory: - factory.return_value = creds - client = client_class.from_service_account_file( - "dummy/file/path.json", transport=transport_name - ) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json( - "dummy/file/path.json", transport=transport_name - ) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - "bigtableadmin.googleapis.com:443" - if transport_name in ["grpc", "grpc_asyncio"] - else "https://bigtableadmin.googleapis.com" - ) - - -def test_base_bigtable_table_admin_client_get_transport_class(): - transport = BaseBigtableTableAdminClient.get_transport_class() - available_transports = [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminRestTransport, - ] - assert transport in available_transports - - transport = BaseBigtableTableAdminClient.get_transport_class("grpc") - assert transport == transports.BigtableTableAdminGrpcTransport - - -@pytest.mark.parametrize( - "client_class,transport_class,transport_name", - [ - ( - BaseBigtableTableAdminClient, - transports.BigtableTableAdminGrpcTransport, - "grpc", - ), - ( - BaseBigtableTableAdminAsyncClient, - transports.BigtableTableAdminGrpcAsyncIOTransport, - "grpc_asyncio", - ), - ( - BaseBigtableTableAdminClient, - transports.BigtableTableAdminRestTransport, - "rest", - ), - ], -) -@mock.patch.object( - BaseBigtableTableAdminClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BaseBigtableTableAdminClient), -) -@mock.patch.object( - BaseBigtableTableAdminAsyncClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient), -) -def test_base_bigtable_table_admin_client_client_options( - client_class, transport_class, transport_name -): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(BaseBigtableTableAdminClient, "get_transport_class") as gtc: - transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(BaseBigtableTableAdminClient, "get_transport_class") as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - client = client_class(transport=transport_name) - assert ( - str(excinfo.value) - == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - ) - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} - ): - with pytest.raises(ValueError) as excinfo: - client = client_class(transport=transport_name) - assert ( - str(excinfo.value) - == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - # Check the case api_endpoint is provided - options = client_options.ClientOptions( - api_audience="https://language.googleapis.com" - ) - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience="https://language.googleapis.com", - ) - - -@pytest.mark.parametrize( - "client_class,transport_class,transport_name,use_client_cert_env", - [ - ( - BaseBigtableTableAdminClient, - transports.BigtableTableAdminGrpcTransport, - "grpc", - "true", - ), - ( - BaseBigtableTableAdminAsyncClient, - transports.BigtableTableAdminGrpcAsyncIOTransport, - "grpc_asyncio", - "true", - ), - ( - BaseBigtableTableAdminClient, - transports.BigtableTableAdminGrpcTransport, - "grpc", - "false", - ), - ( - BaseBigtableTableAdminAsyncClient, - transports.BigtableTableAdminGrpcAsyncIOTransport, - "grpc_asyncio", - "false", - ), - ( - BaseBigtableTableAdminClient, - transports.BigtableTableAdminRestTransport, - "rest", - "true", - ), - ( - BaseBigtableTableAdminClient, - transports.BigtableTableAdminRestTransport, - "rest", - "false", - ), - ], -) -@mock.patch.object( - BaseBigtableTableAdminClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BaseBigtableTableAdminClient), -) -@mock.patch.object( - BaseBigtableTableAdminAsyncClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient), -) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_base_bigtable_table_admin_client_mtls_env_auto( - client_class, transport_class, transport_name, use_client_cert_env -): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - options = client_options.ClientOptions( - client_cert_source=client_cert_source_callback - ) - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ) - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", - return_value=True, - ): - with mock.patch( - "google.auth.transport.mtls.default_client_cert_source", - return_value=client_cert_source_callback, - ): - if use_client_cert_env == "false": - expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ) - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", - return_value=False, - ): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize( - "client_class", [BaseBigtableTableAdminClient, BaseBigtableTableAdminAsyncClient] -) -@mock.patch.object( - BaseBigtableTableAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BaseBigtableTableAdminClient), -) -@mock.patch.object( - BaseBigtableTableAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BaseBigtableTableAdminAsyncClient), -) -def test_base_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source( - client_class, -): - mock_client_cert_source = mock.Mock() - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - mock_api_endpoint = "foo" - options = client_options.ClientOptions( - client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint - ) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( - options - ) - assert api_endpoint == mock_api_endpoint - assert cert_source == mock_client_cert_source - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - mock_client_cert_source = mock.Mock() - mock_api_endpoint = "foo" - options = client_options.ClientOptions( - client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint - ) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( - options - ) - assert api_endpoint == mock_api_endpoint - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", - return_value=False, - ): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", - return_value=True, - ): - with mock.patch( - "google.auth.transport.mtls.default_client_cert_source", - return_value=mock_client_cert_source, - ): - ( - api_endpoint, - cert_source, - ) = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source == mock_client_cert_source - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - client_class.get_mtls_endpoint_and_cert_source() - - assert ( - str(excinfo.value) - == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - ) - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} - ): - with pytest.raises(ValueError) as excinfo: - client_class.get_mtls_endpoint_and_cert_source() - - assert ( - str(excinfo.value) - == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - - -@pytest.mark.parametrize( - "client_class", [BaseBigtableTableAdminClient, BaseBigtableTableAdminAsyncClient] -) -@mock.patch.object( - BaseBigtableTableAdminClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BaseBigtableTableAdminClient), -) -@mock.patch.object( - BaseBigtableTableAdminAsyncClient, - "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient), -) -def test_base_bigtable_table_admin_client_client_api_endpoint(client_class): - mock_client_cert_source = client_cert_source_callback - api_override = "foo.com" - default_universe = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE - default_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=default_universe - ) - mock_universe = "bar.com" - mock_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=mock_universe - ) - - # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", - # use ClientOptions.api_endpoint as the api endpoint regardless. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch( - "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" - ): - options = client_options.ClientOptions( - client_cert_source=mock_client_cert_source, api_endpoint=api_override - ) - client = client_class( - client_options=options, - credentials=ga_credentials.AnonymousCredentials(), - ) - assert client.api_endpoint == api_override - - # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - client = client_class(credentials=ga_credentials.AnonymousCredentials()) - assert client.api_endpoint == default_endpoint - - # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", - # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - client = client_class(credentials=ga_credentials.AnonymousCredentials()) - assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - - # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), - # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, - # and ClientOptions.universe_domain="bar.com", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. - options = client_options.ClientOptions() - universe_exists = hasattr(options, "universe_domain") - if universe_exists: - options = client_options.ClientOptions(universe_domain=mock_universe) - client = client_class( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) - else: - client = client_class( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) - assert client.api_endpoint == ( - mock_endpoint if universe_exists else default_endpoint - ) - assert client.universe_domain == ( - mock_universe if universe_exists else default_universe - ) - - # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. - options = client_options.ClientOptions() - if hasattr(options, "universe_domain"): - delattr(options, "universe_domain") - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - client = client_class( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) - assert client.api_endpoint == default_endpoint - - -@pytest.mark.parametrize( - "client_class,transport_class,transport_name", - [ - ( - BaseBigtableTableAdminClient, - transports.BigtableTableAdminGrpcTransport, - "grpc", - ), - ( - BaseBigtableTableAdminAsyncClient, - transports.BigtableTableAdminGrpcAsyncIOTransport, - "grpc_asyncio", - ), - ( - BaseBigtableTableAdminClient, - transports.BigtableTableAdminRestTransport, - "rest", - ), - ], -) -def test_base_bigtable_table_admin_client_client_options_scopes( - client_class, transport_class, transport_name -): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize( - "client_class,transport_class,transport_name,grpc_helpers", - [ - ( - BaseBigtableTableAdminClient, - transports.BigtableTableAdminGrpcTransport, - "grpc", - grpc_helpers, - ), - ( - BaseBigtableTableAdminAsyncClient, - transports.BigtableTableAdminGrpcAsyncIOTransport, - "grpc_asyncio", - grpc_helpers_async, - ), - ( - BaseBigtableTableAdminClient, - transports.BigtableTableAdminRestTransport, - "rest", - None, - ), - ], -) -def test_base_bigtable_table_admin_client_client_options_credentials_file( - client_class, transport_class, transport_name, grpc_helpers -): - # Check the case credentials file is provided. - options = client_options.ClientOptions(credentials_file="credentials.json") - - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -def test_base_bigtable_table_admin_client_client_options_from_dict(): - with mock.patch( - "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminGrpcTransport.__init__" - ) as grpc_transport: - grpc_transport.return_value = None - client = BaseBigtableTableAdminClient( - client_options={"api_endpoint": "squid.clam.whelk"} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize( - "client_class,transport_class,transport_name,grpc_helpers", - [ - ( - BaseBigtableTableAdminClient, - transports.BigtableTableAdminGrpcTransport, - "grpc", - grpc_helpers, - ), - ( - BaseBigtableTableAdminAsyncClient, - transports.BigtableTableAdminGrpcAsyncIOTransport, - "grpc_asyncio", - grpc_helpers_async, - ), - ], -) -def test_base_bigtable_table_admin_client_create_channel_credentials_file( - client_class, transport_class, transport_name, grpc_helpers -): - # Check the case credentials file is provided. - options = client_options.ClientOptions(credentials_file="credentials.json") - - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # test that the credentials from file are saved and used as the credentials. - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel" - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - file_creds = ga_credentials.AnonymousCredentials() - load_creds.return_value = (file_creds, None) - adc.return_value = (creds, None) - client = client_class(client_options=options, transport=transport_name) - create_channel.assert_called_with( - "bigtableadmin.googleapis.com:443", - credentials=file_creds, - credentials_file=None, - quota_project_id=None, - default_scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - scopes=None, - default_host="bigtableadmin.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateTableRequest, - dict, - ], -) -def test_create_table(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = gba_table.Table( - name="name_value", - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - response = client.create_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CreateTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, gba_table.Table) - assert response.name == "name_value" - assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -def test_create_table_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.CreateTableRequest( - parent="parent_value", - table_id="table_id_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_table(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest( - parent="parent_value", - table_id="table_id_value", - ) - - -def test_create_table_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.create_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.create_table] = mock_rpc - request = {} - client.create_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.create_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_table_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.create_table - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.create_table - ] = mock_rpc - - request = {} - await client.create_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.create_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_table_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.CreateTableRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - gba_table.Table( - name="name_value", - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - ) - response = await client.create_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CreateTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, gba_table.Table) - assert response.name == "name_value" - assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -@pytest.mark.asyncio -async def test_create_table_async_from_dict(): - await test_create_table_async(request_type=dict) - - -def test_create_table_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateTableRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - call.return_value = gba_table.Table() - client.create_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_create_table_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateTableRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table()) - await client.create_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_create_table_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = gba_table.Table() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_table( - parent="parent_value", - table_id="table_id_value", - table=gba_table.Table(name="name_value"), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].table_id - mock_val = "table_id_value" - assert arg == mock_val - arg = args[0].table - mock_val = gba_table.Table(name="name_value") - assert arg == mock_val - - -def test_create_table_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_table( - bigtable_table_admin.CreateTableRequest(), - parent="parent_value", - table_id="table_id_value", - table=gba_table.Table(name="name_value"), - ) - - -@pytest.mark.asyncio -async def test_create_table_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = gba_table.Table() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_table( - parent="parent_value", - table_id="table_id_value", - table=gba_table.Table(name="name_value"), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].table_id - mock_val = "table_id_value" - assert arg == mock_val - arg = args[0].table - mock_val = gba_table.Table(name="name_value") - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_table_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_table( - bigtable_table_admin.CreateTableRequest(), - parent="parent_value", - table_id="table_id_value", - table=gba_table.Table(name="name_value"), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateTableFromSnapshotRequest, - dict, - ], -) -def test_create_table_from_snapshot(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_table_from_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CreateTableFromSnapshotRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_table_from_snapshot_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.CreateTableFromSnapshotRequest( - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_table_from_snapshot(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest( - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - - -def test_create_table_from_snapshot_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_table_from_snapshot - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_table_from_snapshot - ] = mock_rpc - request = {} - client.create_table_from_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_table_from_snapshot(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_table_from_snapshot_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.create_table_from_snapshot - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.create_table_from_snapshot - ] = mock_rpc - - request = {} - await client.create_table_from_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.create_table_from_snapshot(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_table_from_snapshot_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.create_table_from_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CreateTableFromSnapshotRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_table_from_snapshot_async_from_dict(): - await test_create_table_from_snapshot_async(request_type=dict) - - -def test_create_table_from_snapshot_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateTableFromSnapshotRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_table_from_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_create_table_from_snapshot_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateTableFromSnapshotRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.create_table_from_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_create_table_from_snapshot_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_table_from_snapshot( - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].table_id - mock_val = "table_id_value" - assert arg == mock_val - arg = args[0].source_snapshot - mock_val = "source_snapshot_value" - assert arg == mock_val - - -def test_create_table_from_snapshot_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_table_from_snapshot( - bigtable_table_admin.CreateTableFromSnapshotRequest(), - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - - -@pytest.mark.asyncio -async def test_create_table_from_snapshot_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_table_from_snapshot( - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].table_id - mock_val = "table_id_value" - assert arg == mock_val - arg = args[0].source_snapshot - mock_val = "source_snapshot_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_table_from_snapshot_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_table_from_snapshot( - bigtable_table_admin.CreateTableFromSnapshotRequest(), - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListTablesRequest, - dict, - ], -) -def test_list_tables(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListTablesResponse( - next_page_token="next_page_token_value", - ) - response = client.list_tables(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.ListTablesRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTablesPager) - assert response.next_page_token == "next_page_token_value" - - -def test_list_tables_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.ListTablesRequest( - parent="parent_value", - page_token="page_token_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_tables(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest( - parent="parent_value", - page_token="page_token_value", - ) - - -def test_list_tables_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_tables in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.list_tables] = mock_rpc - request = {} - client.list_tables(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_tables(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_tables_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.list_tables - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.list_tables - ] = mock_rpc - - request = {} - await client.list_tables(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.list_tables(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_tables_async( - transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListTablesRequest -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListTablesResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client.list_tables(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.ListTablesRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTablesAsyncPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.asyncio -async def test_list_tables_async_from_dict(): - await test_list_tables_async(request_type=dict) - - -def test_list_tables_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListTablesRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - call.return_value = bigtable_table_admin.ListTablesResponse() - client.list_tables(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_list_tables_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListTablesRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListTablesResponse() - ) - await client.list_tables(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_list_tables_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListTablesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tables( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -def test_list_tables_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tables( - bigtable_table_admin.ListTablesRequest(), - parent="parent_value", - ) - - -@pytest.mark.asyncio -async def test_list_tables_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListTablesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListTablesResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tables( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_tables_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tables( - bigtable_table_admin.ListTablesRequest(), - parent="parent_value", - ) - - -def test_list_tables_pager(transport_name: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token="def", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - RuntimeError, - ) - - expected_metadata = () - retry = retries.Retry() - timeout = 5 - expected_metadata = tuple(expected_metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), - ) - pager = client.list_tables(request={}, retry=retry, timeout=timeout) - - assert pager._metadata == expected_metadata - assert pager._retry == retry - assert pager._timeout == timeout - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Table) for i in results) - - -def test_list_tables_pages(transport_name: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token="def", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tables(request={}).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.asyncio -async def test_list_tables_async_pager(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), "__call__", new_callable=mock.AsyncMock - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token="def", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tables( - request={}, - ) - assert async_pager.next_page_token == "abc" - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, table.Table) for i in responses) - - -@pytest.mark.asyncio -async def test_list_tables_async_pages(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), "__call__", new_callable=mock.AsyncMock - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token="def", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_tables(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetTableRequest, - dict, - ], -) -def test_get_table(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - response = client.get_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.GetTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -def test_get_table_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.GetTableRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_table(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest( - name="name_value", - ) - - -def test_get_table_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_table] = mock_rpc - request = {} - client.get_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_table_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.get_table - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.get_table - ] = mock_rpc - - request = {} - await client.get_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.get_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_table_async( - transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetTableRequest -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - ) - response = await client.get_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.GetTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -@pytest.mark.asyncio -async def test_get_table_async_from_dict(): - await test_get_table_async(request_type=dict) - - -def test_get_table_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetTableRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - call.return_value = table.Table() - client.get_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_get_table_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetTableRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) - await client.get_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_get_table_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = table.Table() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_table( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_get_table_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_table( - bigtable_table_admin.GetTableRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_get_table_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = table.Table() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_table( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_table_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_table( - bigtable_table_admin.GetTableRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UpdateTableRequest, - dict, - ], -) -def test_update_table(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.update_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.UpdateTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_table_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.UpdateTableRequest() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_table(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateTableRequest() - - -def test_update_table_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.update_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.update_table] = mock_rpc - request = {} - client.update_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_table_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.update_table - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.update_table - ] = mock_rpc - - request = {} - await client.update_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.update_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_table_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.UpdateTableRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.update_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.UpdateTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_table_async_from_dict(): - await test_update_table_async(request_type=dict) - - -def test_update_table_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateTableRequest() - - request.table.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "table.name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_update_table_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateTableRequest() - - request.table.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.update_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "table.name=name_value", - ) in kw["metadata"] - - -def test_update_table_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_table( - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].table - mock_val = gba_table.Table(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -def test_update_table_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_table( - bigtable_table_admin.UpdateTableRequest(), - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.asyncio -async def test_update_table_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_table( - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].table - mock_val = gba_table.Table(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_table_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_table( - bigtable_table_admin.UpdateTableRequest(), - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteTableRequest, - dict, - ], -) -def test_delete_table(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.DeleteTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_table_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.DeleteTableRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_table(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest( - name="name_value", - ) - - -def test_delete_table_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.delete_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.delete_table] = mock_rpc - request = {} - client.delete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_table_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.delete_table - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.delete_table - ] = mock_rpc - - request = {} - await client.delete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.delete_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_table_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DeleteTableRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.DeleteTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_table_async_from_dict(): - await test_delete_table_async(request_type=dict) - - -def test_delete_table_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteTableRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - call.return_value = None - client.delete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_delete_table_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteTableRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_delete_table_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_table( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_delete_table_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_table( - bigtable_table_admin.DeleteTableRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_delete_table_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_table( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_table_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_table( - bigtable_table_admin.DeleteTableRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UndeleteTableRequest, - dict, - ], -) -def test_undelete_table(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.undelete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.UndeleteTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_undelete_table_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.UndeleteTableRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.undelete_table(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UndeleteTableRequest( - name="name_value", - ) - - -def test_undelete_table_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.undelete_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.undelete_table] = mock_rpc - request = {} - client.undelete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.undelete_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_undelete_table_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.undelete_table - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.undelete_table - ] = mock_rpc - - request = {} - await client.undelete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.undelete_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_undelete_table_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.UndeleteTableRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.undelete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.UndeleteTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_undelete_table_async_from_dict(): - await test_undelete_table_async(request_type=dict) - - -def test_undelete_table_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UndeleteTableRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.undelete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_undelete_table_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UndeleteTableRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.undelete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_undelete_table_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.undelete_table( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_undelete_table_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undelete_table( - bigtable_table_admin.UndeleteTableRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_undelete_table_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.undelete_table( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_undelete_table_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.undelete_table( - bigtable_table_admin.UndeleteTableRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateAuthorizedViewRequest, - dict, - ], -) -def test_create_authorized_view(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CreateAuthorizedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_authorized_view_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.CreateAuthorizedViewRequest( - parent="parent_value", - authorized_view_id="authorized_view_id_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_authorized_view(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateAuthorizedViewRequest( - parent="parent_value", - authorized_view_id="authorized_view_id_value", - ) - - -def test_create_authorized_view_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_authorized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_authorized_view - ] = mock_rpc - request = {} - client.create_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_authorized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_authorized_view_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.create_authorized_view - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.create_authorized_view - ] = mock_rpc - - request = {} - await client.create_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.create_authorized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_authorized_view_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.CreateAuthorizedViewRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.create_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CreateAuthorizedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_authorized_view_async_from_dict(): - await test_create_authorized_view_async(request_type=dict) - - -def test_create_authorized_view_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateAuthorizedViewRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_create_authorized_view_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateAuthorizedViewRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.create_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_create_authorized_view_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_authorized_view( - parent="parent_value", - authorized_view=table.AuthorizedView(name="name_value"), - authorized_view_id="authorized_view_id_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].authorized_view - mock_val = table.AuthorizedView(name="name_value") - assert arg == mock_val - arg = args[0].authorized_view_id - mock_val = "authorized_view_id_value" - assert arg == mock_val - - -def test_create_authorized_view_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_authorized_view( - bigtable_table_admin.CreateAuthorizedViewRequest(), - parent="parent_value", - authorized_view=table.AuthorizedView(name="name_value"), - authorized_view_id="authorized_view_id_value", - ) - - -@pytest.mark.asyncio -async def test_create_authorized_view_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_authorized_view( - parent="parent_value", - authorized_view=table.AuthorizedView(name="name_value"), - authorized_view_id="authorized_view_id_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].authorized_view - mock_val = table.AuthorizedView(name="name_value") - assert arg == mock_val - arg = args[0].authorized_view_id - mock_val = "authorized_view_id_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_authorized_view_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_authorized_view( - bigtable_table_admin.CreateAuthorizedViewRequest(), - parent="parent_value", - authorized_view=table.AuthorizedView(name="name_value"), - authorized_view_id="authorized_view_id_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListAuthorizedViewsRequest, - dict, - ], -) -def test_list_authorized_views(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse( - next_page_token="next_page_token_value", - ) - response = client.list_authorized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.ListAuthorizedViewsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAuthorizedViewsPager) - assert response.next_page_token == "next_page_token_value" - - -def test_list_authorized_views_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.ListAuthorizedViewsRequest( - parent="parent_value", - page_token="page_token_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_authorized_views(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListAuthorizedViewsRequest( - parent="parent_value", - page_token="page_token_value", - ) - - -def test_list_authorized_views_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.list_authorized_views - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_authorized_views - ] = mock_rpc - request = {} - client.list_authorized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_authorized_views(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_authorized_views_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.list_authorized_views - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.list_authorized_views - ] = mock_rpc - - request = {} - await client.list_authorized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.list_authorized_views(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_authorized_views_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.ListAuthorizedViewsRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListAuthorizedViewsResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client.list_authorized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.ListAuthorizedViewsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAuthorizedViewsAsyncPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.asyncio -async def test_list_authorized_views_async_from_dict(): - await test_list_authorized_views_async(request_type=dict) - - -def test_list_authorized_views_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListAuthorizedViewsRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - client.list_authorized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_list_authorized_views_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListAuthorizedViewsRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListAuthorizedViewsResponse() - ) - await client.list_authorized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_list_authorized_views_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_authorized_views( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -def test_list_authorized_views_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_authorized_views( - bigtable_table_admin.ListAuthorizedViewsRequest(), - parent="parent_value", - ) - - -@pytest.mark.asyncio -async def test_list_authorized_views_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListAuthorizedViewsResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_authorized_views( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_authorized_views_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_authorized_views( - bigtable_table_admin.ListAuthorizedViewsRequest(), - parent="parent_value", - ) - - -def test_list_authorized_views_pager(transport_name: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - table.AuthorizedView(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[], - next_page_token="def", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - ], - ), - RuntimeError, - ) - - expected_metadata = () - retry = retries.Retry() - timeout = 5 - expected_metadata = tuple(expected_metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), - ) - pager = client.list_authorized_views(request={}, retry=retry, timeout=timeout) - - assert pager._metadata == expected_metadata - assert pager._retry == retry - assert pager._timeout == timeout - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.AuthorizedView) for i in results) - - -def test_list_authorized_views_pages(transport_name: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - table.AuthorizedView(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[], - next_page_token="def", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - ], - ), - RuntimeError, - ) - pages = list(client.list_authorized_views(request={}).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.asyncio -async def test_list_authorized_views_async_pager(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), - "__call__", - new_callable=mock.AsyncMock, - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - table.AuthorizedView(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[], - next_page_token="def", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_authorized_views( - request={}, - ) - assert async_pager.next_page_token == "abc" - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, table.AuthorizedView) for i in responses) - - -@pytest.mark.asyncio -async def test_list_authorized_views_async_pages(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), - "__call__", - new_callable=mock.AsyncMock, - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - table.AuthorizedView(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[], - next_page_token="def", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_authorized_views(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetAuthorizedViewRequest, - dict, - ], -) -def test_get_authorized_view(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = table.AuthorizedView( - name="name_value", - etag="etag_value", - deletion_protection=True, - ) - response = client.get_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.GetAuthorizedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.AuthorizedView) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.deletion_protection is True - - -def test_get_authorized_view_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.GetAuthorizedViewRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_authorized_view(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetAuthorizedViewRequest( - name="name_value", - ) - - -def test_get_authorized_view_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.get_authorized_view in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.get_authorized_view - ] = mock_rpc - request = {} - client.get_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_authorized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_authorized_view_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.get_authorized_view - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.get_authorized_view - ] = mock_rpc - - request = {} - await client.get_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.get_authorized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_authorized_view_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.GetAuthorizedViewRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.AuthorizedView( - name="name_value", - etag="etag_value", - deletion_protection=True, - ) - ) - response = await client.get_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.GetAuthorizedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.AuthorizedView) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.deletion_protection is True - - -@pytest.mark.asyncio -async def test_get_authorized_view_async_from_dict(): - await test_get_authorized_view_async(request_type=dict) - - -def test_get_authorized_view_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetAuthorizedViewRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - call.return_value = table.AuthorizedView() - client.get_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_get_authorized_view_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetAuthorizedViewRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.AuthorizedView() - ) - await client.get_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_get_authorized_view_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = table.AuthorizedView() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_authorized_view( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_get_authorized_view_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_authorized_view( - bigtable_table_admin.GetAuthorizedViewRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_get_authorized_view_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = table.AuthorizedView() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.AuthorizedView() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_authorized_view( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_authorized_view_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_authorized_view( - bigtable_table_admin.GetAuthorizedViewRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UpdateAuthorizedViewRequest, - dict, - ], -) -def test_update_authorized_view(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.update_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.UpdateAuthorizedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_authorized_view_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.UpdateAuthorizedViewRequest() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_authorized_view(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateAuthorizedViewRequest() - - -def test_update_authorized_view_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.update_authorized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.update_authorized_view - ] = mock_rpc - request = {} - client.update_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_authorized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_authorized_view_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.update_authorized_view - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.update_authorized_view - ] = mock_rpc - - request = {} - await client.update_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.update_authorized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_authorized_view_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.update_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.UpdateAuthorizedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_authorized_view_async_from_dict(): - await test_update_authorized_view_async(request_type=dict) - - -def test_update_authorized_view_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateAuthorizedViewRequest() - - request.authorized_view.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "authorized_view.name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_update_authorized_view_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateAuthorizedViewRequest() - - request.authorized_view.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.update_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "authorized_view.name=name_value", - ) in kw["metadata"] - - -def test_update_authorized_view_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_authorized_view( - authorized_view=table.AuthorizedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].authorized_view - mock_val = table.AuthorizedView(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -def test_update_authorized_view_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_authorized_view( - bigtable_table_admin.UpdateAuthorizedViewRequest(), - authorized_view=table.AuthorizedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.asyncio -async def test_update_authorized_view_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_authorized_view( - authorized_view=table.AuthorizedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].authorized_view - mock_val = table.AuthorizedView(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_authorized_view_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_authorized_view( - bigtable_table_admin.UpdateAuthorizedViewRequest(), - authorized_view=table.AuthorizedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteAuthorizedViewRequest, - dict, - ], -) -def test_delete_authorized_view(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.DeleteAuthorizedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_authorized_view_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.DeleteAuthorizedViewRequest( - name="name_value", - etag="etag_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_authorized_view(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteAuthorizedViewRequest( - name="name_value", - etag="etag_value", - ) - - -def test_delete_authorized_view_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.delete_authorized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.delete_authorized_view - ] = mock_rpc - request = {} - client.delete_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_authorized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_authorized_view_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.delete_authorized_view - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.delete_authorized_view - ] = mock_rpc - - request = {} - await client.delete_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.delete_authorized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_authorized_view_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.DeleteAuthorizedViewRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_authorized_view_async_from_dict(): - await test_delete_authorized_view_async(request_type=dict) - - -def test_delete_authorized_view_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteAuthorizedViewRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - call.return_value = None - client.delete_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_delete_authorized_view_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteAuthorizedViewRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_delete_authorized_view_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_authorized_view( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_delete_authorized_view_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_authorized_view( - bigtable_table_admin.DeleteAuthorizedViewRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_delete_authorized_view_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_authorized_view( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_authorized_view_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_authorized_view( - bigtable_table_admin.DeleteAuthorizedViewRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ModifyColumnFamiliesRequest, - dict, - ], -) -def test__modify_column_families(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - response = client._modify_column_families(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.ModifyColumnFamiliesRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -def test__modify_column_families_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.ModifyColumnFamiliesRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._modify_column_families(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest( - name="name_value", - ) - - -def test__modify_column_families_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.modify_column_families - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.modify_column_families - ] = mock_rpc - request = {} - client._modify_column_families(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._modify_column_families(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__modify_column_families_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.modify_column_families - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.modify_column_families - ] = mock_rpc - - request = {} - await client._modify_column_families(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client._modify_column_families(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__modify_column_families_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - ) - response = await client._modify_column_families(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.ModifyColumnFamiliesRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -@pytest.mark.asyncio -async def test__modify_column_families_async_from_dict(): - await test__modify_column_families_async(request_type=dict) - - -def test__modify_column_families_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ModifyColumnFamiliesRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - call.return_value = table.Table() - client._modify_column_families(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test__modify_column_families_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ModifyColumnFamiliesRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) - await client._modify_column_families(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test__modify_column_families_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = table.Table() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client._modify_column_families( - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - arg = args[0].modifications - mock_val = [ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") - ] - assert arg == mock_val - - -def test__modify_column_families_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], - ) - - -@pytest.mark.asyncio -async def test__modify_column_families_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = table.Table() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client._modify_column_families( - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - arg = args[0].modifications - mock_val = [ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") - ] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test__modify_column_families_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client._modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DropRowRangeRequest, - dict, - ], -) -def test_drop_row_range(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.DropRowRangeRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -def test_drop_row_range_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.DropRowRangeRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.drop_row_range(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest( - name="name_value", - ) - - -def test_drop_row_range_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.drop_row_range in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.drop_row_range] = mock_rpc - request = {} - client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.drop_row_range(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_drop_row_range_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.drop_row_range - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.drop_row_range - ] = mock_rpc - - request = {} - await client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.drop_row_range(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_drop_row_range_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DropRowRangeRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.DropRowRangeRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_drop_row_range_async_from_dict(): - await test_drop_row_range_async(request_type=dict) - - -def test_drop_row_range_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DropRowRangeRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - call.return_value = None - client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_drop_row_range_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DropRowRangeRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GenerateConsistencyTokenRequest, - dict, - ], -) -def test_generate_consistency_token(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token="consistency_token_value", - ) - response = client.generate_consistency_token(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.GenerateConsistencyTokenRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == "consistency_token_value" - - -def test_generate_consistency_token_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.GenerateConsistencyTokenRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.generate_consistency_token(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest( - name="name_value", - ) - - -def test_generate_consistency_token_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.generate_consistency_token - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.generate_consistency_token - ] = mock_rpc - request = {} - client.generate_consistency_token(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.generate_consistency_token(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_generate_consistency_token_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.generate_consistency_token - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.generate_consistency_token - ] = mock_rpc - - request = {} - await client.generate_consistency_token(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.generate_consistency_token(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_generate_consistency_token_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token="consistency_token_value", - ) - ) - response = await client.generate_consistency_token(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.GenerateConsistencyTokenRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == "consistency_token_value" - - -@pytest.mark.asyncio -async def test_generate_consistency_token_async_from_dict(): - await test_generate_consistency_token_async(request_type=dict) - - -def test_generate_consistency_token_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GenerateConsistencyTokenRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - client.generate_consistency_token(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_generate_consistency_token_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GenerateConsistencyTokenRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.GenerateConsistencyTokenResponse() - ) - await client.generate_consistency_token(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_generate_consistency_token_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.generate_consistency_token( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_generate_consistency_token_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_generate_consistency_token_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.GenerateConsistencyTokenResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.generate_consistency_token( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_generate_consistency_token_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CheckConsistencyRequest, - dict, - ], -) -def test_check_consistency(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.CheckConsistencyResponse( - consistent=True, - ) - response = client.check_consistency(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CheckConsistencyRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True - - -def test_check_consistency_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.CheckConsistencyRequest( - name="name_value", - consistency_token="consistency_token_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.check_consistency(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest( - name="name_value", - consistency_token="consistency_token_value", - ) - - -def test_check_consistency_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.check_consistency in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.check_consistency - ] = mock_rpc - request = {} - client.check_consistency(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.check_consistency(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_check_consistency_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.check_consistency - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.check_consistency - ] = mock_rpc - - request = {} - await client.check_consistency(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.check_consistency(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_check_consistency_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.CheckConsistencyRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse( - consistent=True, - ) - ) - response = await client.check_consistency(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CheckConsistencyRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True - - -@pytest.mark.asyncio -async def test_check_consistency_async_from_dict(): - await test_check_consistency_async(request_type=dict) - - -def test_check_consistency_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CheckConsistencyRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - call.return_value = bigtable_table_admin.CheckConsistencyResponse() - client.check_consistency(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_check_consistency_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CheckConsistencyRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse() - ) - await client.check_consistency(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_check_consistency_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.CheckConsistencyResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.check_consistency( - name="name_value", - consistency_token="consistency_token_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - arg = args[0].consistency_token - mock_val = "consistency_token_value" - assert arg == mock_val - - -def test_check_consistency_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), - name="name_value", - consistency_token="consistency_token_value", - ) - - -@pytest.mark.asyncio -async def test_check_consistency_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.CheckConsistencyResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.check_consistency( - name="name_value", - consistency_token="consistency_token_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - arg = args[0].consistency_token - mock_val = "consistency_token_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_check_consistency_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), - name="name_value", - consistency_token="consistency_token_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.SnapshotTableRequest, - dict, - ], -) -def test__snapshot_table(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client._snapshot_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.SnapshotTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test__snapshot_table_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.SnapshotTableRequest( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._snapshot_table(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", - ) - - -def test__snapshot_table_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.snapshot_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.snapshot_table] = mock_rpc - request = {} - client._snapshot_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client._snapshot_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__snapshot_table_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.snapshot_table - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.snapshot_table - ] = mock_rpc - - request = {} - await client._snapshot_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client._snapshot_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__snapshot_table_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.SnapshotTableRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client._snapshot_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.SnapshotTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test__snapshot_table_async_from_dict(): - await test__snapshot_table_async(request_type=dict) - - -def test__snapshot_table_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.SnapshotTableRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client._snapshot_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test__snapshot_table_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.SnapshotTableRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client._snapshot_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test__snapshot_table_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client._snapshot_table( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - arg = args[0].cluster - mock_val = "cluster_value" - assert arg == mock_val - arg = args[0].snapshot_id - mock_val = "snapshot_id_value" - assert arg == mock_val - arg = args[0].description - mock_val = "description_value" - assert arg == mock_val - - -def test__snapshot_table_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", - ) - - -@pytest.mark.asyncio -async def test__snapshot_table_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client._snapshot_table( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - arg = args[0].cluster - mock_val = "cluster_value" - assert arg == mock_val - arg = args[0].snapshot_id - mock_val = "snapshot_id_value" - assert arg == mock_val - arg = args[0].description - mock_val = "description_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test__snapshot_table_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client._snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetSnapshotRequest, - dict, - ], -) -def test__get_snapshot(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = table.Snapshot( - name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", - ) - response = client._get_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.GetSnapshotRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == "name_value" - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == "description_value" - - -def test__get_snapshot_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.GetSnapshotRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._get_snapshot(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest( - name="name_value", - ) - - -def test__get_snapshot_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_snapshot in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_snapshot] = mock_rpc - request = {} - client._get_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._get_snapshot(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__get_snapshot_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.get_snapshot - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.get_snapshot - ] = mock_rpc - - request = {} - await client._get_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client._get_snapshot(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__get_snapshot_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.GetSnapshotRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Snapshot( - name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", - ) - ) - response = await client._get_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.GetSnapshotRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == "name_value" - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == "description_value" - - -@pytest.mark.asyncio -async def test__get_snapshot_async_from_dict(): - await test__get_snapshot_async(request_type=dict) - - -def test__get_snapshot_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetSnapshotRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - call.return_value = table.Snapshot() - client._get_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test__get_snapshot_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetSnapshotRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) - await client._get_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test__get_snapshot_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = table.Snapshot() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client._get_snapshot( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test__get_snapshot_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test__get_snapshot_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = table.Snapshot() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client._get_snapshot( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test__get_snapshot_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client._get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListSnapshotsRequest, - dict, - ], -) -def test__list_snapshots(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", - ) - response = client._list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.ListSnapshotsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsPager) - assert response.next_page_token == "next_page_token_value" - - -def test__list_snapshots_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.ListSnapshotsRequest( - parent="parent_value", - page_token="page_token_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._list_snapshots(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest( - parent="parent_value", - page_token="page_token_value", - ) - - -def test__list_snapshots_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_snapshots in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.list_snapshots] = mock_rpc - request = {} - client._list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._list_snapshots(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__list_snapshots_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.list_snapshots - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.list_snapshots - ] = mock_rpc - - request = {} - await client._list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client._list_snapshots(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__list_snapshots_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.ListSnapshotsRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client._list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.ListSnapshotsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsAsyncPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.asyncio -async def test__list_snapshots_async_from_dict(): - await test__list_snapshots_async(request_type=dict) - - -def test__list_snapshots_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListSnapshotsRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - call.return_value = bigtable_table_admin.ListSnapshotsResponse() - client._list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test__list_snapshots_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListSnapshotsRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSnapshotsResponse() - ) - await client._list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test__list_snapshots_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSnapshotsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client._list_snapshots( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -def test__list_snapshots_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), - parent="parent_value", - ) - - -@pytest.mark.asyncio -async def test__list_snapshots_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSnapshotsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSnapshotsResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client._list_snapshots( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test__list_snapshots_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client._list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), - parent="parent_value", - ) - - -def test__list_snapshots_pager(transport_name: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, - ) - - expected_metadata = () - retry = retries.Retry() - timeout = 5 - expected_metadata = tuple(expected_metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), - ) - pager = client._list_snapshots(request={}, retry=retry, timeout=timeout) - - assert pager._metadata == expected_metadata - assert pager._retry == retry - assert pager._timeout == timeout - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Snapshot) for i in results) - - -def test__list_snapshots_pages(transport_name: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, - ) - pages = list(client._list_snapshots(request={}).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.asyncio -async def test__list_snapshots_async_pager(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, - ) - async_pager = await client._list_snapshots( - request={}, - ) - assert async_pager.next_page_token == "abc" - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, table.Snapshot) for i in responses) - - -@pytest.mark.asyncio -async def test__list_snapshots_async_pages(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client._list_snapshots(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteSnapshotRequest, - dict, - ], -) -def test__delete_snapshot(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client._delete_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.DeleteSnapshotRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -def test__delete_snapshot_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.DeleteSnapshotRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._delete_snapshot(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest( - name="name_value", - ) - - -def test__delete_snapshot_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.delete_snapshot in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.delete_snapshot] = mock_rpc - request = {} - client._delete_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._delete_snapshot(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__delete_snapshot_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.delete_snapshot - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.delete_snapshot - ] = mock_rpc - - request = {} - await client._delete_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client._delete_snapshot(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__delete_snapshot_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DeleteSnapshotRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client._delete_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.DeleteSnapshotRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test__delete_snapshot_async_from_dict(): - await test__delete_snapshot_async(request_type=dict) - - -def test__delete_snapshot_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteSnapshotRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - call.return_value = None - client._delete_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test__delete_snapshot_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteSnapshotRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client._delete_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test__delete_snapshot_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client._delete_snapshot( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test__delete_snapshot_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test__delete_snapshot_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client._delete_snapshot( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test__delete_snapshot_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client._delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateBackupRequest, - dict, - ], -) -def test_create_backup(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CreateBackupRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_backup_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.CreateBackupRequest( - parent="parent_value", - backup_id="backup_id_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_backup(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest( - parent="parent_value", - backup_id="backup_id_value", - ) - - -def test_create_backup_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.create_backup in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.create_backup] = mock_rpc - request = {} - client.create_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_backup_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.create_backup - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.create_backup - ] = mock_rpc - - request = {} - await client.create_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.create_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_create_backup_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.CreateBackupRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.create_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CreateBackupRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_backup_async_from_dict(): - await test_create_backup_async(request_type=dict) - - -def test_create_backup_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateBackupRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_create_backup_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateBackupRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.create_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_create_backup_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_backup( - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].backup_id - mock_val = "backup_id_value" - assert arg == mock_val - arg = args[0].backup - mock_val = table.Backup(name="name_value") - assert arg == mock_val - - -def test_create_backup_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), - ) - - -@pytest.mark.asyncio -async def test_create_backup_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_backup( - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].backup_id - mock_val = "backup_id_value" - assert arg == mock_val - arg = args[0].backup - mock_val = table.Backup(name="name_value") - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_backup_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetBackupRequest, - dict, - ], -) -def test_get_backup(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - response = client.get_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.GetBackupRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - assert response.backup_type == table.Backup.BackupType.STANDARD - - -def test_get_backup_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.GetBackupRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_backup(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest( - name="name_value", - ) - - -def test_get_backup_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_backup in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_backup] = mock_rpc - request = {} - client.get_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_backup_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.get_backup - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.get_backup - ] = mock_rpc - - request = {} - await client.get_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.get_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_backup_async( - transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - ) - response = await client.get_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.GetBackupRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - assert response.backup_type == table.Backup.BackupType.STANDARD - - -@pytest.mark.asyncio -async def test_get_backup_async_from_dict(): - await test_get_backup_async(request_type=dict) - - -def test_get_backup_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetBackupRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - call.return_value = table.Backup() - client.get_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_get_backup_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetBackupRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - await client.get_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_get_backup_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = table.Backup() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_backup( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_get_backup_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_backup( - bigtable_table_admin.GetBackupRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_get_backup_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = table.Backup() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_backup( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_backup_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_backup( - bigtable_table_admin.GetBackupRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UpdateBackupRequest, - dict, - ], -) -def test_update_backup(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - response = client.update_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.UpdateBackupRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - assert response.backup_type == table.Backup.BackupType.STANDARD - - -def test_update_backup_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.UpdateBackupRequest() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_backup(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() - - -def test_update_backup_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.update_backup in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.update_backup] = mock_rpc - request = {} - client.update_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.update_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_backup_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.update_backup - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.update_backup - ] = mock_rpc - - request = {} - await client.update_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.update_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_update_backup_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.UpdateBackupRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - ) - response = await client.update_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.UpdateBackupRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - assert response.backup_type == table.Backup.BackupType.STANDARD - - -@pytest.mark.asyncio -async def test_update_backup_async_from_dict(): - await test_update_backup_async(request_type=dict) - - -def test_update_backup_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateBackupRequest() - - request.backup.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - call.return_value = table.Backup() - client.update_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "backup.name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_update_backup_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateBackupRequest() - - request.backup.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - await client.update_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "backup.name=name_value", - ) in kw["metadata"] - - -def test_update_backup_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = table.Backup() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_backup( - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].backup - mock_val = table.Backup(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -def test_update_backup_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.asyncio -async def test_update_backup_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = table.Backup() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_backup( - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].backup - mock_val = table.Backup(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_backup_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteBackupRequest, - dict, - ], -) -def test_delete_backup(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.DeleteBackupRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_backup_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.DeleteBackupRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_backup(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest( - name="name_value", - ) - - -def test_delete_backup_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.delete_backup in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.delete_backup] = mock_rpc - request = {} - client.delete_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_backup_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.delete_backup - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.delete_backup - ] = mock_rpc - - request = {} - await client.delete_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.delete_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_delete_backup_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DeleteBackupRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.DeleteBackupRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_backup_async_from_dict(): - await test_delete_backup_async(request_type=dict) - - -def test_delete_backup_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteBackupRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - call.return_value = None - client.delete_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_delete_backup_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteBackupRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test_delete_backup_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_backup( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test_delete_backup_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test_delete_backup_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_backup( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_backup_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListBackupsRequest, - dict, - ], -) -def test_list_backups(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListBackupsResponse( - next_page_token="next_page_token_value", - ) - response = client.list_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.ListBackupsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsPager) - assert response.next_page_token == "next_page_token_value" - - -def test_list_backups_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.ListBackupsRequest( - parent="parent_value", - filter="filter_value", - order_by="order_by_value", - page_token="page_token_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_backups(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest( - parent="parent_value", - filter="filter_value", - order_by="order_by_value", - page_token="page_token_value", - ) - - -def test_list_backups_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_backups in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.list_backups] = mock_rpc - request = {} - client.list_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_backups(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_backups_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.list_backups - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.list_backups - ] = mock_rpc - - request = {} - await client.list_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.list_backups(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_list_backups_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.ListBackupsRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListBackupsResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client.list_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.ListBackupsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsAsyncPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.asyncio -async def test_list_backups_async_from_dict(): - await test_list_backups_async(request_type=dict) - - -def test_list_backups_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListBackupsRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - call.return_value = bigtable_table_admin.ListBackupsResponse() - client.list_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_list_backups_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListBackupsRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListBackupsResponse() - ) - await client.list_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_list_backups_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListBackupsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_backups( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -def test_list_backups_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_backups( - bigtable_table_admin.ListBackupsRequest(), - parent="parent_value", - ) - - -@pytest.mark.asyncio -async def test_list_backups_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListBackupsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListBackupsResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_backups( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_backups_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_backups( - bigtable_table_admin.ListBackupsRequest(), - parent="parent_value", - ) - - -def test_list_backups_pager(transport_name: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token="def", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - RuntimeError, - ) - - expected_metadata = () - retry = retries.Retry() - timeout = 5 - expected_metadata = tuple(expected_metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), - ) - pager = client.list_backups(request={}, retry=retry, timeout=timeout) - - assert pager._metadata == expected_metadata - assert pager._retry == retry - assert pager._timeout == timeout - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Backup) for i in results) - - -def test_list_backups_pages(transport_name: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token="def", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - RuntimeError, - ) - pages = list(client.list_backups(request={}).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.asyncio -async def test_list_backups_async_pager(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token="def", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_backups( - request={}, - ) - assert async_pager.next_page_token == "abc" - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, table.Backup) for i in responses) - - -@pytest.mark.asyncio -async def test_list_backups_async_pages(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token="def", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_backups(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.RestoreTableRequest, - dict, - ], -) -def test__restore_table(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client._restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.RestoreTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test__restore_table_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.RestoreTableRequest( - parent="parent_value", - table_id="table_id_value", - backup="backup_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._restore_table(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest( - parent="parent_value", - table_id="table_id_value", - backup="backup_value", - ) - - -def test__restore_table_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.restore_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc - request = {} - client._restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client._restore_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__restore_table_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.restore_table - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.restore_table - ] = mock_rpc - - request = {} - await client._restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client._restore_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__restore_table_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.RestoreTableRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client._restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.RestoreTableRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test__restore_table_async_from_dict(): - await test__restore_table_async(request_type=dict) - - -def test__restore_table_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.RestoreTableRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client._restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test__restore_table_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.RestoreTableRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client._restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CopyBackupRequest, - dict, - ], -) -def test_copy_backup(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.copy_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CopyBackupRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_copy_backup_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.CopyBackupRequest( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.copy_backup(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CopyBackupRequest( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - ) - - -def test_copy_backup_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.copy_backup in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.copy_backup] = mock_rpc - request = {} - client.copy_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.copy_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_copy_backup_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.copy_backup - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.copy_backup - ] = mock_rpc - - request = {} - await client.copy_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client.copy_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_copy_backup_async( - transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.copy_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CopyBackupRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_copy_backup_async_from_dict(): - await test_copy_backup_async(request_type=dict) - - -def test_copy_backup_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CopyBackupRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.copy_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_copy_backup_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CopyBackupRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.copy_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test_copy_backup_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.copy_backup( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].backup_id - mock_val = "backup_id_value" - assert arg == mock_val - arg = args[0].source_backup - mock_val = "source_backup_value" - assert arg == mock_val - assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( - seconds=751 - ) - - -def test_copy_backup_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - - -@pytest.mark.asyncio -async def test_copy_backup_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.copy_backup( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].backup_id - mock_val = "backup_id_value" - assert arg == mock_val - arg = args[0].source_backup - mock_val = "source_backup_value" - assert arg == mock_val - assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( - seconds=751 - ) - - -@pytest.mark.asyncio -async def test_copy_backup_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.GetIamPolicyRequest, - dict, - ], -) -def test_get_iam_policy(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - response = client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = iam_policy_pb2.GetIamPolicyRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -def test_get_iam_policy_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = iam_policy_pb2.GetIamPolicyRequest( - resource="resource_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_iam_policy(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest( - resource="resource_value", - ) - - -def test_get_iam_policy_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_iam_policy in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc - request = {} - client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_iam_policy_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.get_iam_policy - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.get_iam_policy - ] = mock_rpc - - request = {} - await client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.get_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_get_iam_policy_async( - transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - response = await client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = iam_policy_pb2.GetIamPolicyRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -@pytest.mark.asyncio -async def test_get_iam_policy_async_from_dict(): - await test_get_iam_policy_async(request_type=dict) - - -def test_get_iam_policy_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.GetIamPolicyRequest() - - request.resource = "resource_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "resource=resource_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_get_iam_policy_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.GetIamPolicyRequest() - - request.resource = "resource_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - await client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "resource=resource_value", - ) in kw["metadata"] - - -def test_get_iam_policy_from_dict_foreign(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - response = client.get_iam_policy( - request={ - "resource": "resource_value", - "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), - } - ) - call.assert_called() - - -def test_get_iam_policy_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_iam_policy( - resource="resource_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - - -def test_get_iam_policy_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", - ) - - -@pytest.mark.asyncio -async def test_get_iam_policy_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_iam_policy( - resource="resource_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_iam_policy_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.SetIamPolicyRequest, - dict, - ], -) -def test_set_iam_policy(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - response = client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = iam_policy_pb2.SetIamPolicyRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -def test_set_iam_policy_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = iam_policy_pb2.SetIamPolicyRequest( - resource="resource_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.set_iam_policy(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest( - resource="resource_value", - ) - - -def test_set_iam_policy_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.set_iam_policy in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc - request = {} - client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.set_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_set_iam_policy_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.set_iam_policy - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.set_iam_policy - ] = mock_rpc - - request = {} - await client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.set_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_set_iam_policy_async( - transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - response = await client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = iam_policy_pb2.SetIamPolicyRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -@pytest.mark.asyncio -async def test_set_iam_policy_async_from_dict(): - await test_set_iam_policy_async(request_type=dict) - - -def test_set_iam_policy_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.SetIamPolicyRequest() - - request.resource = "resource_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "resource=resource_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_set_iam_policy_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.SetIamPolicyRequest() - - request.resource = "resource_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - await client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "resource=resource_value", - ) in kw["metadata"] - - -def test_set_iam_policy_from_dict_foreign(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - response = client.set_iam_policy( - request={ - "resource": "resource_value", - "policy": policy_pb2.Policy(version=774), - "update_mask": field_mask_pb2.FieldMask(paths=["paths_value"]), - } - ) - call.assert_called() - - -def test_set_iam_policy_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_iam_policy( - resource="resource_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - - -def test_set_iam_policy_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", - ) - - -@pytest.mark.asyncio -async def test_set_iam_policy_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_iam_policy( - resource="resource_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_set_iam_policy_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.TestIamPermissionsRequest, - dict, - ], -) -def test_test_iam_permissions(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) - response = client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = iam_policy_pb2.TestIamPermissionsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] - - -def test_test_iam_permissions_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = iam_policy_pb2.TestIamPermissionsRequest( - resource="resource_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.test_iam_permissions(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest( - resource="resource_value", - ) - - -def test_test_iam_permissions_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.test_iam_permissions in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.test_iam_permissions - ] = mock_rpc - request = {} - client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.test_iam_permissions(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_test_iam_permissions_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.test_iam_permissions - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.test_iam_permissions - ] = mock_rpc - - request = {} - await client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client.test_iam_permissions(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test_test_iam_permissions_async( - transport: str = "grpc_asyncio", - request_type=iam_policy_pb2.TestIamPermissionsRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) - ) - response = await client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = iam_policy_pb2.TestIamPermissionsRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] - - -@pytest.mark.asyncio -async def test_test_iam_permissions_async_from_dict(): - await test_test_iam_permissions_async(request_type=dict) - - -def test_test_iam_permissions_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.TestIamPermissionsRequest() - - request.resource = "resource_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "resource=resource_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_test_iam_permissions_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.TestIamPermissionsRequest() - - request.resource = "resource_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse() - ) - await client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "resource=resource_value", - ) in kw["metadata"] - - -def test_test_iam_permissions_from_dict_foreign(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - response = client.test_iam_permissions( - request={ - "resource": "resource_value", - "permissions": ["permissions_value"], - } - ) - call.assert_called() - - -def test_test_iam_permissions_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.test_iam_permissions( - resource="resource_value", - permissions=["permissions_value"], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - arg = args[0].permissions - mock_val = ["permissions_value"] - assert arg == mock_val - - -def test_test_iam_permissions_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], - ) - - -@pytest.mark.asyncio -async def test_test_iam_permissions_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.test_iam_permissions( - resource="resource_value", - permissions=["permissions_value"], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - arg = args[0].permissions - mock_val = ["permissions_value"] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_test_iam_permissions_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateSchemaBundleRequest, - dict, - ], -) -def test__create_schema_bundle(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client._create_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CreateSchemaBundleRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test__create_schema_bundle_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.CreateSchemaBundleRequest( - parent="parent_value", - schema_bundle_id="schema_bundle_id_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_schema_bundle), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._create_schema_bundle(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateSchemaBundleRequest( - parent="parent_value", - schema_bundle_id="schema_bundle_id_value", - ) - - -def test__create_schema_bundle_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_schema_bundle in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_schema_bundle - ] = mock_rpc - request = {} - client._create_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client._create_schema_bundle(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__create_schema_bundle_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.create_schema_bundle - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.create_schema_bundle - ] = mock_rpc - - request = {} - await client._create_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client._create_schema_bundle(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__create_schema_bundle_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.CreateSchemaBundleRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client._create_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.CreateSchemaBundleRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test__create_schema_bundle_async_from_dict(): - await test__create_schema_bundle_async(request_type=dict) - - -def test__create_schema_bundle_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateSchemaBundleRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_schema_bundle), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client._create_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test__create_schema_bundle_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateSchemaBundleRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_schema_bundle), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client._create_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test__create_schema_bundle_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client._create_schema_bundle( - parent="parent_value", - schema_bundle_id="schema_bundle_id_value", - schema_bundle=table.SchemaBundle(name="name_value"), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].schema_bundle_id - mock_val = "schema_bundle_id_value" - assert arg == mock_val - arg = args[0].schema_bundle - mock_val = table.SchemaBundle(name="name_value") - assert arg == mock_val - - -def test__create_schema_bundle_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._create_schema_bundle( - bigtable_table_admin.CreateSchemaBundleRequest(), - parent="parent_value", - schema_bundle_id="schema_bundle_id_value", - schema_bundle=table.SchemaBundle(name="name_value"), - ) - - -@pytest.mark.asyncio -async def test__create_schema_bundle_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client._create_schema_bundle( - parent="parent_value", - schema_bundle_id="schema_bundle_id_value", - schema_bundle=table.SchemaBundle(name="name_value"), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].schema_bundle_id - mock_val = "schema_bundle_id_value" - assert arg == mock_val - arg = args[0].schema_bundle - mock_val = table.SchemaBundle(name="name_value") - assert arg == mock_val - - -@pytest.mark.asyncio -async def test__create_schema_bundle_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client._create_schema_bundle( - bigtable_table_admin.CreateSchemaBundleRequest(), - parent="parent_value", - schema_bundle_id="schema_bundle_id_value", - schema_bundle=table.SchemaBundle(name="name_value"), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UpdateSchemaBundleRequest, - dict, - ], -) -def test__update_schema_bundle(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client._update_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.UpdateSchemaBundleRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test__update_schema_bundle_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.UpdateSchemaBundleRequest() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_schema_bundle), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._update_schema_bundle(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateSchemaBundleRequest() - - -def test__update_schema_bundle_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.update_schema_bundle in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.update_schema_bundle - ] = mock_rpc - request = {} - client._update_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client._update_schema_bundle(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__update_schema_bundle_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.update_schema_bundle - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.update_schema_bundle - ] = mock_rpc - - request = {} - await client._update_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods call wrapper_fn to build a cached - # client._transport.operations_client instance on first rpc call. - # Subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - await client._update_schema_bundle(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__update_schema_bundle_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.UpdateSchemaBundleRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client._update_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.UpdateSchemaBundleRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test__update_schema_bundle_async_from_dict(): - await test__update_schema_bundle_async(request_type=dict) - - -def test__update_schema_bundle_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateSchemaBundleRequest() - - request.schema_bundle.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_schema_bundle), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client._update_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "schema_bundle.name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test__update_schema_bundle_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateSchemaBundleRequest() - - request.schema_bundle.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_schema_bundle), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client._update_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "schema_bundle.name=name_value", - ) in kw["metadata"] - - -def test__update_schema_bundle_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client._update_schema_bundle( - schema_bundle=table.SchemaBundle(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].schema_bundle - mock_val = table.SchemaBundle(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -def test__update_schema_bundle_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._update_schema_bundle( - bigtable_table_admin.UpdateSchemaBundleRequest(), - schema_bundle=table.SchemaBundle(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.asyncio -async def test__update_schema_bundle_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client._update_schema_bundle( - schema_bundle=table.SchemaBundle(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].schema_bundle - mock_val = table.SchemaBundle(name="name_value") - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test__update_schema_bundle_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client._update_schema_bundle( - bigtable_table_admin.UpdateSchemaBundleRequest(), - schema_bundle=table.SchemaBundle(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetSchemaBundleRequest, - dict, - ], -) -def test__get_schema_bundle(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = table.SchemaBundle( - name="name_value", - etag="etag_value", - ) - response = client._get_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.GetSchemaBundleRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.SchemaBundle) - assert response.name == "name_value" - assert response.etag == "etag_value" - - -def test__get_schema_bundle_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.GetSchemaBundleRequest( - name="name_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_schema_bundle), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._get_schema_bundle(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSchemaBundleRequest( - name="name_value", - ) - - -def test__get_schema_bundle_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_schema_bundle in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.get_schema_bundle - ] = mock_rpc - request = {} - client._get_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._get_schema_bundle(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__get_schema_bundle_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.get_schema_bundle - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.get_schema_bundle - ] = mock_rpc - - request = {} - await client._get_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client._get_schema_bundle(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__get_schema_bundle_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.GetSchemaBundleRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.SchemaBundle( - name="name_value", - etag="etag_value", - ) - ) - response = await client._get_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.GetSchemaBundleRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, table.SchemaBundle) - assert response.name == "name_value" - assert response.etag == "etag_value" - - -@pytest.mark.asyncio -async def test__get_schema_bundle_async_from_dict(): - await test__get_schema_bundle_async(request_type=dict) - - -def test__get_schema_bundle_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetSchemaBundleRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_schema_bundle), "__call__" - ) as call: - call.return_value = table.SchemaBundle() - client._get_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test__get_schema_bundle_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetSchemaBundleRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_schema_bundle), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.SchemaBundle()) - await client._get_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test__get_schema_bundle_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = table.SchemaBundle() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client._get_schema_bundle( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test__get_schema_bundle_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._get_schema_bundle( - bigtable_table_admin.GetSchemaBundleRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test__get_schema_bundle_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = table.SchemaBundle() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.SchemaBundle()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client._get_schema_bundle( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test__get_schema_bundle_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client._get_schema_bundle( - bigtable_table_admin.GetSchemaBundleRequest(), - name="name_value", - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListSchemaBundlesRequest, - dict, - ], -) -def test__list_schema_bundles(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSchemaBundlesResponse( - next_page_token="next_page_token_value", - ) - response = client._list_schema_bundles(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.ListSchemaBundlesRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSchemaBundlesPager) - assert response.next_page_token == "next_page_token_value" - - -def test__list_schema_bundles_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.ListSchemaBundlesRequest( - parent="parent_value", - page_token="page_token_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._list_schema_bundles(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSchemaBundlesRequest( - parent="parent_value", - page_token="page_token_value", - ) - - -def test__list_schema_bundles_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.list_schema_bundles in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_schema_bundles - ] = mock_rpc - request = {} - client._list_schema_bundles(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._list_schema_bundles(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__list_schema_bundles_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.list_schema_bundles - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.list_schema_bundles - ] = mock_rpc - - request = {} - await client._list_schema_bundles(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client._list_schema_bundles(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__list_schema_bundles_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.ListSchemaBundlesRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSchemaBundlesResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client._list_schema_bundles(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.ListSchemaBundlesRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSchemaBundlesAsyncPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.asyncio -async def test__list_schema_bundles_async_from_dict(): - await test__list_schema_bundles_async(request_type=dict) - - -def test__list_schema_bundles_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListSchemaBundlesRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), "__call__" - ) as call: - call.return_value = bigtable_table_admin.ListSchemaBundlesResponse() - client._list_schema_bundles(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test__list_schema_bundles_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListSchemaBundlesRequest() - - request.parent = "parent_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSchemaBundlesResponse() - ) - await client._list_schema_bundles(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] - - -def test__list_schema_bundles_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSchemaBundlesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client._list_schema_bundles( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -def test__list_schema_bundles_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._list_schema_bundles( - bigtable_table_admin.ListSchemaBundlesRequest(), - parent="parent_value", - ) - - -@pytest.mark.asyncio -async def test__list_schema_bundles_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSchemaBundlesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSchemaBundlesResponse() - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client._list_schema_bundles( - parent="parent_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test__list_schema_bundles_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client._list_schema_bundles( - bigtable_table_admin.ListSchemaBundlesRequest(), - parent="parent_value", - ) - - -def test__list_schema_bundles_pager(transport_name: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), "__call__" - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - table.SchemaBundle(), - table.SchemaBundle(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[], - next_page_token="def", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - table.SchemaBundle(), - ], - ), - RuntimeError, - ) - - expected_metadata = () - retry = retries.Retry() - timeout = 5 - expected_metadata = tuple(expected_metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), - ) - pager = client._list_schema_bundles(request={}, retry=retry, timeout=timeout) - - assert pager._metadata == expected_metadata - assert pager._retry == retry - assert pager._timeout == timeout - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.SchemaBundle) for i in results) - - -def test__list_schema_bundles_pages(transport_name: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), "__call__" - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - table.SchemaBundle(), - table.SchemaBundle(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[], - next_page_token="def", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - table.SchemaBundle(), - ], - ), - RuntimeError, - ) - pages = list(client._list_schema_bundles(request={}).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.asyncio -async def test__list_schema_bundles_async_pager(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), - "__call__", - new_callable=mock.AsyncMock, - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - table.SchemaBundle(), - table.SchemaBundle(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[], - next_page_token="def", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - table.SchemaBundle(), - ], - ), - RuntimeError, - ) - async_pager = await client._list_schema_bundles( - request={}, - ) - assert async_pager.next_page_token == "abc" - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, table.SchemaBundle) for i in responses) - - -@pytest.mark.asyncio -async def test__list_schema_bundles_async_pages(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), - "__call__", - new_callable=mock.AsyncMock, - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - table.SchemaBundle(), - table.SchemaBundle(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[], - next_page_token="def", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - table.SchemaBundle(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client._list_schema_bundles(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteSchemaBundleRequest, - dict, - ], -) -def test__delete_schema_bundle(request_type, transport: str = "grpc"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client._delete_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.DeleteSchemaBundleRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -def test__delete_schema_bundle_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Populate all string fields in the request which are not UUID4 - # since we want to check that UUID4 are populated automatically - # if they meet the requirements of AIP 4235. - request = bigtable_table_admin.DeleteSchemaBundleRequest( - name="name_value", - etag="etag_value", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_schema_bundle), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._delete_schema_bundle(request=request) - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSchemaBundleRequest( - name="name_value", - etag="etag_value", - ) - - -def test__delete_schema_bundle_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.delete_schema_bundle in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.delete_schema_bundle - ] = mock_rpc - request = {} - client._delete_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._delete_schema_bundle(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__delete_schema_bundle_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._client._transport.delete_schema_bundle - in client._client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.AsyncMock() - mock_rpc.return_value = mock.Mock() - client._client._transport._wrapped_methods[ - client._client._transport.delete_schema_bundle - ] = mock_rpc - - request = {} - await client._delete_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - await client._delete_schema_bundle(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.asyncio -async def test__delete_schema_bundle_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DeleteSchemaBundleRequest, -): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client._delete_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - request = bigtable_table_admin.DeleteSchemaBundleRequest() - assert args[0] == request - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test__delete_schema_bundle_async_from_dict(): - await test__delete_schema_bundle_async(request_type=dict) - - -def test__delete_schema_bundle_field_headers(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteSchemaBundleRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_schema_bundle), "__call__" - ) as call: - call.return_value = None - client._delete_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test__delete_schema_bundle_field_headers_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteSchemaBundleRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_schema_bundle), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client._delete_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -def test__delete_schema_bundle_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client._delete_schema_bundle( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -def test__delete_schema_bundle_flattened_error(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._delete_schema_bundle( - bigtable_table_admin.DeleteSchemaBundleRequest(), - name="name_value", - ) - - -@pytest.mark.asyncio -async def test__delete_schema_bundle_flattened_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client._delete_schema_bundle( - name="name_value", - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test__delete_schema_bundle_flattened_error_async(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client._delete_schema_bundle( - bigtable_table_admin.DeleteSchemaBundleRequest(), - name="name_value", - ) - - -def test_create_table_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.create_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.create_table] = mock_rpc - - request = {} - client.create_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.create_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_create_table_rest_required_fields( - request_type=bigtable_table_admin.CreateTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - jsonified_request["tableId"] = "table_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == "table_id_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = gba_table.Table() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.create_table(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_create_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.create_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "tableId", - "table", - ) - ) - ) - - -def test_create_table_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = gba_table.Table() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - table_id="table_id_value", - table=gba_table.Table(name="name_value"), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, - args[1], - ) - - -def test_create_table_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_table( - bigtable_table_admin.CreateTableRequest(), - parent="parent_value", - table_id="table_id_value", - table=gba_table.Table(name="name_value"), - ) - - -def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_table_from_snapshot - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_table_from_snapshot - ] = mock_rpc - - request = {} - client.create_table_from_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_table_from_snapshot(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_create_table_from_snapshot_rest_required_fields( - request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" - request_init["source_snapshot"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - jsonified_request["tableId"] = "table_id_value" - jsonified_request["sourceSnapshot"] = "source_snapshot_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == "table_id_value" - assert "sourceSnapshot" in jsonified_request - assert jsonified_request["sourceSnapshot"] == "source_snapshot_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.create_table_from_snapshot(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_create_table_from_snapshot_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "tableId", - "sourceSnapshot", - ) - ) - ) - - -def test_create_table_from_snapshot_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_table_from_snapshot(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" - % client.transport._host, - args[1], - ) - - -def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_table_from_snapshot( - bigtable_table_admin.CreateTableFromSnapshotRequest(), - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - - -def test_list_tables_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_tables in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.list_tables] = mock_rpc - - request = {} - client.list_tables(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_tables(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_list_tables_rest_required_fields( - request_type=bigtable_table_admin.ListTablesRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_tables._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_tables._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - "view", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.list_tables(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_tables_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_tables._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - "view", - ) - ) - & set(("parent",)) - ) - - -def test_list_tables_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_tables(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, - args[1], - ) - - -def test_list_tables_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tables( - bigtable_table_admin.ListTablesRequest(), - parent="parent_value", - ) - - -def test_list_tables_rest_pager(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token="def", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListTablesResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/instances/sample2"} - - pager = client.list_tables(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Table) for i in results) - - pages = list(client.list_tables(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -def test_get_table_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_table] = mock_rpc - - request = {} - client.get_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_get_table_rest_required_fields( - request_type=bigtable_table_admin.GetTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_table._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("view",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Table() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.get_table(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(("view",)) & set(("name",))) - - -def test_get_table_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Table() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.get_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, - args[1], - ) - - -def test_get_table_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_table( - bigtable_table_admin.GetTableRequest(), - name="name_value", - ) - - -def test_update_table_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.update_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.update_table] = mock_rpc - - request = {} - client.update_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_update_table_rest_required_fields( - request_type=bigtable_table_admin.UpdateTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_table._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "ignore_warnings", - "update_mask", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.update_table(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_update_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.update_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "ignoreWarnings", - "updateMask", - ) - ) - & set( - ( - "table", - "updateMask", - ) - ) - ) - - -def test_update_table_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} - } - - # get truthy value for each flattened field - mock_args = dict( - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.update_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{table.name=projects/*/instances/*/tables/*}" - % client.transport._host, - args[1], - ) - - -def test_update_table_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_table( - bigtable_table_admin.UpdateTableRequest(), - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_delete_table_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.delete_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.delete_table] = mock_rpc - - request = {} - client.delete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_delete_table_rest_required_fields( - request_type=bigtable_table_admin.DeleteTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.delete_table(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_delete_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.delete_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_delete_table_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.delete_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, - args[1], - ) - - -def test_delete_table_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_table( - bigtable_table_admin.DeleteTableRequest(), - name="name_value", - ) - - -def test_undelete_table_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.undelete_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.undelete_table] = mock_rpc - - request = {} - client.undelete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.undelete_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_undelete_table_rest_required_fields( - request_type=bigtable_table_admin.UndeleteTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).undelete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).undelete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.undelete_table(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_undelete_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.undelete_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_undelete_table_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.undelete_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:undelete" - % client.transport._host, - args[1], - ) - - -def test_undelete_table_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undelete_table( - bigtable_table_admin.UndeleteTableRequest(), - name="name_value", - ) - - -def test_create_authorized_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_authorized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_authorized_view - ] = mock_rpc - - request = {} - client.create_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_authorized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_create_authorized_view_rest_required_fields( - request_type=bigtable_table_admin.CreateAuthorizedViewRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["authorized_view_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - assert "authorizedViewId" not in jsonified_request - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_authorized_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "authorizedViewId" in jsonified_request - assert jsonified_request["authorizedViewId"] == request_init["authorized_view_id"] - - jsonified_request["parent"] = "parent_value" - jsonified_request["authorizedViewId"] = "authorized_view_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_authorized_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("authorized_view_id",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "authorizedViewId" in jsonified_request - assert jsonified_request["authorizedViewId"] == "authorized_view_id_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.create_authorized_view(request) - - expected_params = [ - ( - "authorizedViewId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_create_authorized_view_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.create_authorized_view._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("authorizedViewId",)) - & set( - ( - "parent", - "authorizedViewId", - "authorizedView", - ) - ) - ) - - -def test_create_authorized_view_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - authorized_view=table.AuthorizedView(name="name_value"), - authorized_view_id="authorized_view_id_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_authorized_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews" - % client.transport._host, - args[1], - ) - - -def test_create_authorized_view_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_authorized_view( - bigtable_table_admin.CreateAuthorizedViewRequest(), - parent="parent_value", - authorized_view=table.AuthorizedView(name="name_value"), - authorized_view_id="authorized_view_id_value", - ) - - -def test_list_authorized_views_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.list_authorized_views - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_authorized_views - ] = mock_rpc - - request = {} - client.list_authorized_views(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_authorized_views(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_list_authorized_views_rest_required_fields( - request_type=bigtable_table_admin.ListAuthorizedViewsRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_authorized_views._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_authorized_views._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - "view", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.list_authorized_views(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_authorized_views_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_authorized_views._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - "view", - ) - ) - & set(("parent",)) - ) - - -def test_list_authorized_views_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_authorized_views(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews" - % client.transport._host, - args[1], - ) - - -def test_list_authorized_views_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_authorized_views( - bigtable_table_admin.ListAuthorizedViewsRequest(), - parent="parent_value", - ) - - -def test_list_authorized_views_rest_pager(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - table.AuthorizedView(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[], - next_page_token="def", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListAuthorizedViewsResponse.to_json(x) - for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - - pager = client.list_authorized_views(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.AuthorizedView) for i in results) - - pages = list(client.list_authorized_views(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -def test_get_authorized_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.get_authorized_view in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.get_authorized_view - ] = mock_rpc - - request = {} - client.get_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_authorized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_get_authorized_view_rest_required_fields( - request_type=bigtable_table_admin.GetAuthorizedViewRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_authorized_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_authorized_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("view",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.AuthorizedView() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.AuthorizedView.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.get_authorized_view(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_authorized_view_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_authorized_view._get_unset_required_fields({}) - assert set(unset_fields) == (set(("view",)) & set(("name",))) - - -def test_get_authorized_view_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.AuthorizedView() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.AuthorizedView.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.get_authorized_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" - % client.transport._host, - args[1], - ) - - -def test_get_authorized_view_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_authorized_view( - bigtable_table_admin.GetAuthorizedViewRequest(), - name="name_value", - ) - - -def test_update_authorized_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.update_authorized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.update_authorized_view - ] = mock_rpc - - request = {} - client.update_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_authorized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_update_authorized_view_rest_required_fields( - request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_authorized_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_authorized_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "ignore_warnings", - "update_mask", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.update_authorized_view(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_update_authorized_view_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.update_authorized_view._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "ignoreWarnings", - "updateMask", - ) - ) - & set(("authorizedView",)) - ) - - -def test_update_authorized_view_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "authorized_view": { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - } - - # get truthy value for each flattened field - mock_args = dict( - authorized_view=table.AuthorizedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.update_authorized_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}" - % client.transport._host, - args[1], - ) - - -def test_update_authorized_view_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_authorized_view( - bigtable_table_admin.UpdateAuthorizedViewRequest(), - authorized_view=table.AuthorizedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_delete_authorized_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.delete_authorized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.delete_authorized_view - ] = mock_rpc - - request = {} - client.delete_authorized_view(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_authorized_view(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_delete_authorized_view_rest_required_fields( - request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_authorized_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_authorized_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("etag",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.delete_authorized_view(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_delete_authorized_view_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.delete_authorized_view._get_unset_required_fields({}) - assert set(unset_fields) == (set(("etag",)) & set(("name",))) - - -def test_delete_authorized_view_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.delete_authorized_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" - % client.transport._host, - args[1], - ) - - -def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_authorized_view( - bigtable_table_admin.DeleteAuthorizedViewRequest(), - name="name_value", - ) - - -def test__modify_column_families_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.modify_column_families - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.modify_column_families - ] = mock_rpc - - request = {} - client._modify_column_families(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._modify_column_families(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test__modify_column_families_rest_required_fields( - request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).modify_column_families._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).modify_column_families._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Table() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client._modify_column_families(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test__modify_column_families_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.modify_column_families._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "name", - "modifications", - ) - ) - ) - - -def test__modify_column_families_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Table() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client._modify_column_families(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" - % client.transport._host, - args[1], - ) - - -def test__modify_column_families_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], - ) - - -def test_drop_row_range_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.drop_row_range in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.drop_row_range] = mock_rpc - - request = {} - client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.drop_row_range(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_drop_row_range_rest_required_fields( - request_type=bigtable_table_admin.DropRowRangeRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).drop_row_range._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).drop_row_range._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.drop_row_range(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_drop_row_range_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.drop_row_range._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_generate_consistency_token_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.generate_consistency_token - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.generate_consistency_token - ] = mock_rpc - - request = {} - client.generate_consistency_token(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.generate_consistency_token(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_generate_consistency_token_rest_required_fields( - request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).generate_consistency_token._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).generate_consistency_token._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.generate_consistency_token(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_generate_consistency_token_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.generate_consistency_token._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_generate_consistency_token_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.generate_consistency_token(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" - % client.transport._host, - args[1], - ) - - -def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), - name="name_value", - ) - - -def test_check_consistency_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.check_consistency in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.check_consistency - ] = mock_rpc - - request = {} - client.check_consistency(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.check_consistency(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_check_consistency_rest_required_fields( - request_type=bigtable_table_admin.CheckConsistencyRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request_init["consistency_token"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).check_consistency._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - jsonified_request["consistencyToken"] = "consistency_token_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).check_consistency._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - assert "consistencyToken" in jsonified_request - assert jsonified_request["consistencyToken"] == "consistency_token_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.CheckConsistencyResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.check_consistency(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_check_consistency_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.check_consistency._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "name", - "consistencyToken", - ) - ) - ) - - -def test_check_consistency_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - consistency_token="consistency_token_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.check_consistency(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" - % client.transport._host, - args[1], - ) - - -def test_check_consistency_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), - name="name_value", - consistency_token="consistency_token_value", - ) - - -def test__snapshot_table_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.snapshot_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.snapshot_table] = mock_rpc - - request = {} - client._snapshot_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client._snapshot_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test__snapshot_table_rest_required_fields( - request_type=bigtable_table_admin.SnapshotTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request_init["cluster"] = "" - request_init["snapshot_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).snapshot_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - jsonified_request["cluster"] = "cluster_value" - jsonified_request["snapshotId"] = "snapshot_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).snapshot_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - assert "cluster" in jsonified_request - assert jsonified_request["cluster"] == "cluster_value" - assert "snapshotId" in jsonified_request - assert jsonified_request["snapshotId"] == "snapshot_id_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client._snapshot_table(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test__snapshot_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.snapshot_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "name", - "cluster", - "snapshotId", - ) - ) - ) - - -def test__snapshot_table_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client._snapshot_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:snapshot" - % client.transport._host, - args[1], - ) - - -def test__snapshot_table_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", - ) - - -def test__get_snapshot_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_snapshot in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_snapshot] = mock_rpc - - request = {} - client._get_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._get_snapshot(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test__get_snapshot_rest_required_fields( - request_type=bigtable_table_admin.GetSnapshotRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Snapshot() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client._get_snapshot(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test__get_snapshot_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test__get_snapshot_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Snapshot() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client._get_snapshot(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - % client.transport._host, - args[1], - ) - - -def test__get_snapshot_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), - name="name_value", - ) - - -def test__list_snapshots_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_snapshots in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.list_snapshots] = mock_rpc - - request = {} - client._list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._list_snapshots(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test__list_snapshots_rest_required_fields( - request_type=bigtable_table_admin.ListSnapshotsRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_snapshots._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_snapshots._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client._list_snapshots(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test__list_snapshots_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_snapshots._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) - - -def test__list_snapshots_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client._list_snapshots(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" - % client.transport._host, - args[1], - ) - - -def test__list_snapshots_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), - parent="parent_value", - ) - - -def test__list_snapshots_rest_pager(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListSnapshotsResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - pager = client._list_snapshots(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Snapshot) for i in results) - - pages = list(client._list_snapshots(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -def test__delete_snapshot_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.delete_snapshot in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.delete_snapshot] = mock_rpc - - request = {} - client._delete_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._delete_snapshot(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test__delete_snapshot_rest_required_fields( - request_type=bigtable_table_admin.DeleteSnapshotRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client._delete_snapshot(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test__delete_snapshot_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.delete_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test__delete_snapshot_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client._delete_snapshot(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - % client.transport._host, - args[1], - ) - - -def test__delete_snapshot_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), - name="name_value", - ) - - -def test_create_backup_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.create_backup in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.create_backup] = mock_rpc - - request = {} - client.create_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_create_backup_rest_required_fields( - request_type=bigtable_table_admin.CreateBackupRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["backup_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - assert "backupId" not in jsonified_request - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == request_init["backup_id"] - - jsonified_request["parent"] = "parent_value" - jsonified_request["backupId"] = "backup_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_backup._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("backup_id",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == "backup_id_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.create_backup(request) - - expected_params = [ - ( - "backupId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_create_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.create_backup._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("backupId",)) - & set( - ( - "parent", - "backupId", - "backup", - ) - ) - ) - - -def test_create_backup_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" - % client.transport._host, - args[1], - ) - - -def test_create_backup_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), - ) - - -def test_get_backup_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_backup in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_backup] = mock_rpc - - request = {} - client.get_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_get_backup_rest_required_fields( - request_type=bigtable_table_admin.GetBackupRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Backup() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.get_backup(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_backup._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_get_backup_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.get_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" - % client.transport._host, - args[1], - ) - - -def test_get_backup_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_backup( - bigtable_table_admin.GetBackupRequest(), - name="name_value", - ) - - -def test_update_backup_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.update_backup in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.update_backup] = mock_rpc - - request = {} - client.update_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.update_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_update_backup_rest_required_fields( - request_type=bigtable_table_admin.UpdateBackupRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_backup._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Backup() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.update_backup(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_update_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.update_backup._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("updateMask",)) - & set( - ( - "backup", - "updateMask", - ) - ) - ) - - -def test_update_backup_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup() - - # get arguments that satisfy an http rule for this method - sample_request = { - "backup": { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - } - - # get truthy value for each flattened field - mock_args = dict( - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.update_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}" - % client.transport._host, - args[1], - ) - - -def test_update_backup_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_delete_backup_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.delete_backup in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.delete_backup] = mock_rpc - - request = {} - client.delete_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.delete_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_delete_backup_rest_required_fields( - request_type=bigtable_table_admin.DeleteBackupRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.delete_backup(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_delete_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.delete_backup._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_delete_backup_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.delete_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" - % client.transport._host, - args[1], - ) - - -def test_delete_backup_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), - name="name_value", - ) - - -def test_list_backups_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_backups in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.list_backups] = mock_rpc - - request = {} - client.list_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_backups(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_list_backups_rest_required_fields( - request_type=bigtable_table_admin.ListBackupsRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_backups._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_backups._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "filter", - "order_by", - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.list_backups(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_backups_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_backups._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "filter", - "orderBy", - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) - - -def test_list_backups_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_backups(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" - % client.transport._host, - args[1], - ) - - -def test_list_backups_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_backups( - bigtable_table_admin.ListBackupsRequest(), - parent="parent_value", - ) - - -def test_list_backups_rest_pager(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token="def", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListBackupsResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - pager = client.list_backups(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Backup) for i in results) - - pages = list(client.list_backups(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -def test__restore_table_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.restore_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc - - request = {} - client._restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client._restore_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test__restore_table_rest_required_fields( - request_type=bigtable_table_admin.RestoreTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).restore_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - jsonified_request["tableId"] = "table_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).restore_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == "table_id_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client._restore_table(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test__restore_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.restore_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "tableId", - ) - ) - ) - - -def test_copy_backup_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.copy_backup in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.copy_backup] = mock_rpc - - request = {} - client.copy_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.copy_backup(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_copy_backup_rest_required_fields( - request_type=bigtable_table_admin.CopyBackupRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["backup_id"] = "" - request_init["source_backup"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).copy_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - jsonified_request["backupId"] = "backup_id_value" - jsonified_request["sourceBackup"] = "source_backup_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).copy_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == "backup_id_value" - assert "sourceBackup" in jsonified_request - assert jsonified_request["sourceBackup"] == "source_backup_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.copy_backup(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_copy_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.copy_backup._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "backupId", - "sourceBackup", - "expireTime", - ) - ) - ) - - -def test_copy_backup_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.copy_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy" - % client.transport._host, - args[1], - ) - - -def test_copy_backup_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - - -def test_get_iam_policy_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_iam_policy in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc - - request = {} - client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.get_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_get_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.GetIamPolicyRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = "resource_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.get_iam_policy(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource",))) - - -def test_get_iam_policy_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = { - "resource": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.get_iam_policy(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" - % client.transport._host, - args[1], - ) - - -def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", - ) - - -def test_set_iam_policy_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.set_iam_policy in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc - - request = {} - client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.set_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_set_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.SetIamPolicyRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = "resource_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.set_iam_policy(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_set_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.set_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "resource", - "policy", - ) - ) - ) - - -def test_set_iam_policy_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = { - "resource": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.set_iam_policy(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" - % client.transport._host, - args[1], - ) - - -def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", - ) - - -def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.test_iam_permissions in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.test_iam_permissions - ] = mock_rpc - - request = {} - client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.test_iam_permissions(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_test_iam_permissions_rest_required_fields( - request_type=iam_policy_pb2.TestIamPermissionsRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request_init["permissions"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = "resource_value" - jsonified_request["permissions"] = "permissions_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - assert "permissions" in jsonified_request - assert jsonified_request["permissions"] == "permissions_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.test_iam_permissions(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_test_iam_permissions_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "resource", - "permissions", - ) - ) - ) - - -def test_test_iam_permissions_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "resource": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - permissions=["permissions_value"], - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.test_iam_permissions(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" - % client.transport._host, - args[1], - ) - - -def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], - ) - - -def test__create_schema_bundle_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_schema_bundle in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_schema_bundle - ] = mock_rpc - - request = {} - client._create_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client._create_schema_bundle(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test__create_schema_bundle_rest_required_fields( - request_type=bigtable_table_admin.CreateSchemaBundleRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["schema_bundle_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - assert "schemaBundleId" not in jsonified_request - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_schema_bundle._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "schemaBundleId" in jsonified_request - assert jsonified_request["schemaBundleId"] == request_init["schema_bundle_id"] - - jsonified_request["parent"] = "parent_value" - jsonified_request["schemaBundleId"] = "schema_bundle_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_schema_bundle._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("schema_bundle_id",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "schemaBundleId" in jsonified_request - assert jsonified_request["schemaBundleId"] == "schema_bundle_id_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client._create_schema_bundle(request) - - expected_params = [ - ( - "schemaBundleId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test__create_schema_bundle_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.create_schema_bundle._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("schemaBundleId",)) - & set( - ( - "parent", - "schemaBundleId", - "schemaBundle", - ) - ) - ) - - -def test__create_schema_bundle_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - schema_bundle_id="schema_bundle_id_value", - schema_bundle=table.SchemaBundle(name="name_value"), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client._create_schema_bundle(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles" - % client.transport._host, - args[1], - ) - - -def test__create_schema_bundle_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._create_schema_bundle( - bigtable_table_admin.CreateSchemaBundleRequest(), - parent="parent_value", - schema_bundle_id="schema_bundle_id_value", - schema_bundle=table.SchemaBundle(name="name_value"), - ) - - -def test__update_schema_bundle_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.update_schema_bundle in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.update_schema_bundle - ] = mock_rpc - - request = {} - client._update_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client._update_schema_bundle(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test__update_schema_bundle_rest_required_fields( - request_type=bigtable_table_admin.UpdateSchemaBundleRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_schema_bundle._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_schema_bundle._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "ignore_warnings", - "update_mask", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client._update_schema_bundle(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test__update_schema_bundle_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.update_schema_bundle._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "ignoreWarnings", - "updateMask", - ) - ) - & set(("schemaBundle",)) - ) - - -def test__update_schema_bundle_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "schema_bundle": { - "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" - } - } - - # get truthy value for each flattened field - mock_args = dict( - schema_bundle=table.SchemaBundle(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client._update_schema_bundle(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{schema_bundle.name=projects/*/instances/*/tables/*/schemaBundles/*}" - % client.transport._host, - args[1], - ) - - -def test__update_schema_bundle_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._update_schema_bundle( - bigtable_table_admin.UpdateSchemaBundleRequest(), - schema_bundle=table.SchemaBundle(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test__get_schema_bundle_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_schema_bundle in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.get_schema_bundle - ] = mock_rpc - - request = {} - client._get_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._get_schema_bundle(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test__get_schema_bundle_rest_required_fields( - request_type=bigtable_table_admin.GetSchemaBundleRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_schema_bundle._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_schema_bundle._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.SchemaBundle() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.SchemaBundle.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client._get_schema_bundle(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test__get_schema_bundle_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_schema_bundle._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test__get_schema_bundle_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.SchemaBundle() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.SchemaBundle.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client._get_schema_bundle(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}" - % client.transport._host, - args[1], - ) - - -def test__get_schema_bundle_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._get_schema_bundle( - bigtable_table_admin.GetSchemaBundleRequest(), - name="name_value", - ) - - -def test__list_schema_bundles_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.list_schema_bundles in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.list_schema_bundles - ] = mock_rpc - - request = {} - client._list_schema_bundles(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._list_schema_bundles(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test__list_schema_bundles_rest_required_fields( - request_type=bigtable_table_admin.ListSchemaBundlesRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_schema_bundles._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_schema_bundles._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSchemaBundlesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSchemaBundlesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client._list_schema_bundles(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test__list_schema_bundles_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_schema_bundles._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) - - -def test__list_schema_bundles_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSchemaBundlesResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSchemaBundlesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client._list_schema_bundles(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles" - % client.transport._host, - args[1], - ) - - -def test__list_schema_bundles_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._list_schema_bundles( - bigtable_table_admin.ListSchemaBundlesRequest(), - parent="parent_value", - ) - - -def test__list_schema_bundles_rest_pager(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - table.SchemaBundle(), - table.SchemaBundle(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[], - next_page_token="def", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSchemaBundlesResponse( - schema_bundles=[ - table.SchemaBundle(), - table.SchemaBundle(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListSchemaBundlesResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - - pager = client._list_schema_bundles(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.SchemaBundle) for i in results) - - pages = list(client._list_schema_bundles(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -def test__delete_schema_bundle_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.delete_schema_bundle in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.delete_schema_bundle - ] = mock_rpc - - request = {} - client._delete_schema_bundle(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client._delete_schema_bundle(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test__delete_schema_bundle_rest_required_fields( - request_type=bigtable_table_admin.DeleteSchemaBundleRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_schema_bundle._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_schema_bundle._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("etag",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client._delete_schema_bundle(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test__delete_schema_bundle_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.delete_schema_bundle._get_unset_required_fields({}) - assert set(unset_fields) == (set(("etag",)) & set(("name",))) - - -def test__delete_schema_bundle_rest_flattened(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client._delete_schema_bundle(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}" - % client.transport._host, - args[1], - ) - - -def test__delete_schema_bundle_rest_flattened_error(transport: str = "rest"): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client._delete_schema_bundle( - bigtable_table_admin.DeleteSchemaBundleRequest(), - name="name_value", - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BaseBigtableTableAdminClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BaseBigtableTableAdminClient( - client_options=options, - transport=transport, - ) - - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BaseBigtableTableAdminClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BaseBigtableTableAdminClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - assert client.transport is transport - - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.BigtableTableAdminGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - transports.BigtableTableAdminRestTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - - -def test_transport_kind_grpc(): - transport = BaseBigtableTableAdminClient.get_transport_class("grpc")( - credentials=ga_credentials.AnonymousCredentials() - ) - assert transport.kind == "grpc" - - -def test_initialize_client_w_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc" - ) - assert client is not None - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_table_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - call.return_value = gba_table.Table() - client.create_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_table_from_snapshot_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_table_from_snapshot(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_tables_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - call.return_value = bigtable_table_admin.ListTablesResponse() - client.list_tables(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListTablesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_table_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - call.return_value = table.Table() - client.get_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_table_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_table_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - call.return_value = None - client.delete_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_undelete_table_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.undelete_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UndeleteTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_authorized_view_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_authorized_views_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - client.list_authorized_views(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_authorized_view_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - call.return_value = table.AuthorizedView() - client.get_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetAuthorizedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_authorized_view_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_authorized_view_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - call.return_value = None - client.delete_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__modify_column_families_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - call.return_value = table.Table() - client._modify_column_families(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_drop_row_range_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - call.return_value = None - client.drop_row_range(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DropRowRangeRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_generate_consistency_token_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - client.generate_consistency_token(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_check_consistency_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - call.return_value = bigtable_table_admin.CheckConsistencyResponse() - client.check_consistency(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CheckConsistencyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__snapshot_table_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client._snapshot_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.SnapshotTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__get_snapshot_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - call.return_value = table.Snapshot() - client._get_snapshot(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetSnapshotRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__list_snapshots_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - call.return_value = bigtable_table_admin.ListSnapshotsResponse() - client._list_snapshots(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListSnapshotsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__delete_snapshot_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - call.return_value = None - client._delete_snapshot(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteSnapshotRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_backup_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_backup_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - call.return_value = table.Backup() - client.get_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_backup_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - call.return_value = table.Backup() - client.update_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_backup_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - call.return_value = None - client.delete_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_backups_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - call.return_value = bigtable_table_admin.ListBackupsResponse() - client.list_backups(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListBackupsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__restore_table_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client._restore_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.RestoreTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_copy_backup_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.copy_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CopyBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_iam_policy_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.get_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.GetIamPolicyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_set_iam_policy_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.set_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.SetIamPolicyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_test_iam_permissions_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - client.test_iam_permissions(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.TestIamPermissionsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__create_schema_bundle_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_schema_bundle), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client._create_schema_bundle(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateSchemaBundleRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__update_schema_bundle_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_schema_bundle), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client._update_schema_bundle(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateSchemaBundleRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__get_schema_bundle_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_schema_bundle), "__call__" - ) as call: - call.return_value = table.SchemaBundle() - client._get_schema_bundle(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetSchemaBundleRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__list_schema_bundles_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), "__call__" - ) as call: - call.return_value = bigtable_table_admin.ListSchemaBundlesResponse() - client._list_schema_bundles(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListSchemaBundlesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__delete_schema_bundle_empty_call_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_schema_bundle), "__call__" - ) as call: - call.return_value = None - client._delete_schema_bundle(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteSchemaBundleRequest() - - assert args[0] == request_msg - - -def test_transport_kind_grpc_asyncio(): - transport = BaseBigtableTableAdminAsyncClient.get_transport_class("grpc_asyncio")( - credentials=async_anonymous_credentials() - ) - assert transport.kind == "grpc_asyncio" - - -def test_initialize_client_w_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), transport="grpc_asyncio" - ) - assert client is not None - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_table_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - gba_table.Table( - name="name_value", - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - ) - await client.create_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_table_from_snapshot_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.create_table_from_snapshot(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_tables_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListTablesResponse( - next_page_token="next_page_token_value", - ) - ) - await client.list_tables(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListTablesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_table_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - ) - await client.get_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_table_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.update_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_table_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_undelete_table_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.undelete_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UndeleteTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_authorized_view_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.create_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_authorized_views_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListAuthorizedViewsResponse( - next_page_token="next_page_token_value", - ) - ) - await client.list_authorized_views(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_authorized_view_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.AuthorizedView( - name="name_value", - etag="etag_value", - deletion_protection=True, - ) - ) - await client.get_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetAuthorizedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_authorized_view_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.update_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_authorized_view_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test__modify_column_families_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - ) - await client._modify_column_families(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_drop_row_range_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.drop_row_range(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DropRowRangeRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_generate_consistency_token_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token="consistency_token_value", - ) - ) - await client.generate_consistency_token(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_check_consistency_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse( - consistent=True, - ) - ) - await client.check_consistency(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CheckConsistencyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test__snapshot_table_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client._snapshot_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.SnapshotTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test__get_snapshot_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Snapshot( - name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", - ) - ) - await client._get_snapshot(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetSnapshotRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test__list_snapshots_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", - ) - ) - await client._list_snapshots(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListSnapshotsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test__delete_snapshot_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client._delete_snapshot(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteSnapshotRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_backup_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.create_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_backup_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - ) - await client.get_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_backup_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - ) - await client.update_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_backup_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_backups_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListBackupsResponse( - next_page_token="next_page_token_value", - ) - ) - await client.list_backups(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListBackupsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test__restore_table_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client._restore_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.RestoreTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_copy_backup_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.copy_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CopyBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_iam_policy_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - await client.get_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.GetIamPolicyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_set_iam_policy_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - await client.set_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.SetIamPolicyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_test_iam_permissions_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) - ) - await client.test_iam_permissions(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.TestIamPermissionsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test__create_schema_bundle_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client._create_schema_bundle(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateSchemaBundleRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test__update_schema_bundle_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client._update_schema_bundle(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateSchemaBundleRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test__get_schema_bundle_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.SchemaBundle( - name="name_value", - etag="etag_value", - ) - ) - await client._get_schema_bundle(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetSchemaBundleRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test__list_schema_bundles_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSchemaBundlesResponse( - next_page_token="next_page_token_value", - ) - ) - await client._list_schema_bundles(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListSchemaBundlesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test__delete_schema_bundle_empty_call_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_schema_bundle), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client._delete_schema_bundle(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteSchemaBundleRequest() - - assert args[0] == request_msg - - -def test_transport_kind_rest(): - transport = BaseBigtableTableAdminClient.get_transport_class("rest")( - credentials=ga_credentials.AnonymousCredentials() - ) - assert transport.kind == "rest" - - -def test_create_table_rest_bad_request( - request_type=bigtable_table_admin.CreateTableRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_table(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateTableRequest, - dict, - ], -) -def test_create_table_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = gba_table.Table( - name="name_value", - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_table(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, gba_table.Table) - assert response.name == "name_value" - assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_table_with_metadata" - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CreateTableRequest.pb( - bigtable_table_admin.CreateTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = gba_table.Table.to_json(gba_table.Table()) - req.return_value.content = return_value - - request = bigtable_table_admin.CreateTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = gba_table.Table() - post_with_metadata.return_value = gba_table.Table(), metadata - - client.create_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_create_table_from_snapshot_rest_bad_request( - request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_table_from_snapshot(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateTableFromSnapshotRequest, - dict, - ], -) -def test_create_table_from_snapshot_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_table_from_snapshot(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_table_from_snapshot_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_create_table_from_snapshot_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb( - bigtable_table_admin.CreateTableFromSnapshotRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_table_admin.CreateTableFromSnapshotRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.create_table_from_snapshot( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_list_tables_rest_bad_request( - request_type=bigtable_table_admin.ListTablesRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_tables(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListTablesRequest, - dict, - ], -) -def test_list_tables_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse( - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_tables(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTablesPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_tables_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_tables" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_tables_with_metadata" - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_tables" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.ListTablesRequest.pb( - bigtable_table_admin.ListTablesRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.ListTablesResponse.to_json( - bigtable_table_admin.ListTablesResponse() - ) - req.return_value.content = return_value - - request = bigtable_table_admin.ListTablesRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListTablesResponse() - post_with_metadata.return_value = ( - bigtable_table_admin.ListTablesResponse(), - metadata, - ) - - client.list_tables( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRequest): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_table(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetTableRequest, - dict, - ], -) -def test_get_table_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_table(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_table_with_metadata" - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.GetTableRequest.pb( - bigtable_table_admin.GetTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.Table.to_json(table.Table()) - req.return_value.content = return_value - - request = bigtable_table_admin.GetTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Table() - post_with_metadata.return_value = table.Table(), metadata - - client.get_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_update_table_rest_bad_request( - request_type=bigtable_table_admin.UpdateTableRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_table(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UpdateTableRequest, - dict, - ], -) -def test_update_table_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} - } - request_init["table"] = { - "name": "projects/sample1/instances/sample2/tables/sample3", - "cluster_states": {}, - "column_families": {}, - "granularity": 1, - "restore_info": { - "source_type": 1, - "backup_info": { - "backup": "backup_value", - "start_time": {"seconds": 751, "nanos": 543}, - "end_time": {}, - "source_table": "source_table_value", - "source_backup": "source_backup_value", - }, - }, - "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, - "deletion_protection": True, - "automated_backup_policy": {"retention_period": {}, "frequency": {}}, - "row_key_schema": { - "fields": [ - { - "field_name": "field_name_value", - "type_": { - "bytes_type": {"encoding": {"raw": {}}}, - "string_type": {"encoding": {"utf8_raw": {}, "utf8_bytes": {}}}, - "int64_type": { - "encoding": { - "big_endian_bytes": {"bytes_type": {}}, - "ordered_code_bytes": {}, - } - }, - "float32_type": {}, - "float64_type": {}, - "bool_type": {}, - "timestamp_type": {"encoding": {"unix_micros_int64": {}}}, - "date_type": {}, - "aggregate_type": { - "input_type": {}, - "state_type": {}, - "sum": {}, - "hllpp_unique_count": {}, - "max_": {}, - "min_": {}, - }, - "struct_type": {}, - "array_type": {"element_type": {}}, - "map_type": {"key_type": {}, "value_type": {}}, - "proto_type": { - "schema_bundle_id": "schema_bundle_id_value", - "message_name": "message_name_value", - }, - "enum_type": { - "schema_bundle_id": "schema_bundle_id_value", - "enum_name": "enum_name_value", - }, - }, - } - ], - "encoding": { - "singleton": {}, - "delimited_bytes": {"delimiter": b"delimiter_blob"}, - "ordered_code_bytes": {}, - }, - }, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["table"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["table"][field])): - del request_init["table"][field][i][subfield] - else: - del request_init["table"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_table(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_table_with_metadata" - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.UpdateTableRequest.pb( - bigtable_table_admin.UpdateTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_table_admin.UpdateTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.update_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_delete_table_rest_bad_request( - request_type=bigtable_table_admin.DeleteTableRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_table(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteTableRequest, - dict, - ], -) -def test_delete_table_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "" - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_table(request) - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_table" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteTableRequest.pb( - bigtable_table_admin.DeleteTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - request = bigtable_table_admin.DeleteTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_undelete_table_rest_bad_request( - request_type=bigtable_table_admin.UndeleteTableRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.undelete_table(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UndeleteTableRequest, - dict, - ], -) -def test_undelete_table_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.undelete_table(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_undelete_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_undelete_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_undelete_table_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_undelete_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.UndeleteTableRequest.pb( - bigtable_table_admin.UndeleteTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_table_admin.UndeleteTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.undelete_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_create_authorized_view_rest_bad_request( - request_type=bigtable_table_admin.CreateAuthorizedViewRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_authorized_view(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateAuthorizedViewRequest, - dict, - ], -) -def test_create_authorized_view_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request_init["authorized_view"] = { - "name": "name_value", - "subset_view": { - "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], - "family_subsets": {}, - }, - "etag": "etag_value", - "deletion_protection": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.CreateAuthorizedViewRequest.meta.fields[ - "authorized_view" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["authorized_view"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["authorized_view"][field])): - del request_init["authorized_view"][field][i][subfield] - else: - del request_init["authorized_view"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_authorized_view(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_authorized_view_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_authorized_view" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_create_authorized_view_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_authorized_view" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CreateAuthorizedViewRequest.pb( - bigtable_table_admin.CreateAuthorizedViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_table_admin.CreateAuthorizedViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.create_authorized_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_list_authorized_views_rest_bad_request( - request_type=bigtable_table_admin.ListAuthorizedViewsRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_authorized_views(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListAuthorizedViewsRequest, - dict, - ], -) -def test_list_authorized_views_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListAuthorizedViewsResponse( - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_authorized_views(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAuthorizedViewsPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_authorized_views_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_authorized_views" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_list_authorized_views_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_authorized_views" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.ListAuthorizedViewsRequest.pb( - bigtable_table_admin.ListAuthorizedViewsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.ListAuthorizedViewsResponse.to_json( - bigtable_table_admin.ListAuthorizedViewsResponse() - ) - req.return_value.content = return_value - - request = bigtable_table_admin.ListAuthorizedViewsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - post_with_metadata.return_value = ( - bigtable_table_admin.ListAuthorizedViewsResponse(), - metadata, - ) - - client.list_authorized_views( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_get_authorized_view_rest_bad_request( - request_type=bigtable_table_admin.GetAuthorizedViewRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_authorized_view(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetAuthorizedViewRequest, - dict, - ], -) -def test_get_authorized_view_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.AuthorizedView( - name="name_value", - etag="etag_value", - deletion_protection=True, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.AuthorizedView.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_authorized_view(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.AuthorizedView) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.deletion_protection is True - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_authorized_view_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_authorized_view" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_get_authorized_view_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_authorized_view" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.GetAuthorizedViewRequest.pb( - bigtable_table_admin.GetAuthorizedViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.AuthorizedView.to_json(table.AuthorizedView()) - req.return_value.content = return_value - - request = bigtable_table_admin.GetAuthorizedViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.AuthorizedView() - post_with_metadata.return_value = table.AuthorizedView(), metadata - - client.get_authorized_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_update_authorized_view_rest_bad_request( - request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "authorized_view": { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_authorized_view(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UpdateAuthorizedViewRequest, - dict, - ], -) -def test_update_authorized_view_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "authorized_view": { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - } - request_init["authorized_view"] = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", - "subset_view": { - "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], - "family_subsets": {}, - }, - "etag": "etag_value", - "deletion_protection": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateAuthorizedViewRequest.meta.fields[ - "authorized_view" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["authorized_view"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["authorized_view"][field])): - del request_init["authorized_view"][field][i][subfield] - else: - del request_init["authorized_view"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_authorized_view(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_authorized_view_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_authorized_view" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_update_authorized_view_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_authorized_view" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.UpdateAuthorizedViewRequest.pb( - bigtable_table_admin.UpdateAuthorizedViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_table_admin.UpdateAuthorizedViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.update_authorized_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_delete_authorized_view_rest_bad_request( - request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_authorized_view(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteAuthorizedViewRequest, - dict, - ], -) -def test_delete_authorized_view_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "" - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_authorized_view(request) - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_authorized_view_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_authorized_view" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteAuthorizedViewRequest.pb( - bigtable_table_admin.DeleteAuthorizedViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - request = bigtable_table_admin.DeleteAuthorizedViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_authorized_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test__modify_column_families_rest_bad_request( - request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client._modify_column_families(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ModifyColumnFamiliesRequest, - dict, - ], -) -def test__modify_column_families_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client._modify_column_families(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test__modify_column_families_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_modify_column_families" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_modify_column_families_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb( - bigtable_table_admin.ModifyColumnFamiliesRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.Table.to_json(table.Table()) - req.return_value.content = return_value - - request = bigtable_table_admin.ModifyColumnFamiliesRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Table() - post_with_metadata.return_value = table.Table(), metadata - - client._modify_column_families( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_drop_row_range_rest_bad_request( - request_type=bigtable_table_admin.DropRowRangeRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.drop_row_range(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DropRowRangeRequest, - dict, - ], -) -def test_drop_row_range_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "" - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.drop_row_range(request) - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_drop_row_range_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_drop_row_range" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DropRowRangeRequest.pb( - bigtable_table_admin.DropRowRangeRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - request = bigtable_table_admin.DropRowRangeRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.drop_row_range( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_generate_consistency_token_rest_bad_request( - request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.generate_consistency_token(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GenerateConsistencyTokenRequest, - dict, - ], -) -def test_generate_consistency_token_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token="consistency_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.generate_consistency_token(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == "consistency_token_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_generate_consistency_token_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_generate_consistency_token_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( - bigtable_table_admin.GenerateConsistencyTokenRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( - bigtable_table_admin.GenerateConsistencyTokenResponse() - ) - req.return_value.content = return_value - - request = bigtable_table_admin.GenerateConsistencyTokenRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - post_with_metadata.return_value = ( - bigtable_table_admin.GenerateConsistencyTokenResponse(), - metadata, - ) - - client.generate_consistency_token( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_check_consistency_rest_bad_request( - request_type=bigtable_table_admin.CheckConsistencyRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.check_consistency(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CheckConsistencyRequest, - dict, - ], -) -def test_check_consistency_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse( - consistent=True, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.check_consistency(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_check_consistency_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_check_consistency" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_check_consistency_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_check_consistency" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CheckConsistencyRequest.pb( - bigtable_table_admin.CheckConsistencyRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.CheckConsistencyResponse.to_json( - bigtable_table_admin.CheckConsistencyResponse() - ) - req.return_value.content = return_value - - request = bigtable_table_admin.CheckConsistencyRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.CheckConsistencyResponse() - post_with_metadata.return_value = ( - bigtable_table_admin.CheckConsistencyResponse(), - metadata, - ) - - client.check_consistency( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test__snapshot_table_rest_bad_request( - request_type=bigtable_table_admin.SnapshotTableRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client._snapshot_table(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.SnapshotTableRequest, - dict, - ], -) -def test__snapshot_table_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client._snapshot_table(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test__snapshot_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_snapshot_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_snapshot_table_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.SnapshotTableRequest.pb( - bigtable_table_admin.SnapshotTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_table_admin.SnapshotTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client._snapshot_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test__get_snapshot_rest_bad_request( - request_type=bigtable_table_admin.GetSnapshotRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client._get_snapshot(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetSnapshotRequest, - dict, - ], -) -def test__get_snapshot_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Snapshot( - name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client._get_snapshot(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == "name_value" - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == "description_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test__get_snapshot_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_snapshot" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_snapshot_with_metadata" - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.GetSnapshotRequest.pb( - bigtable_table_admin.GetSnapshotRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.Snapshot.to_json(table.Snapshot()) - req.return_value.content = return_value - - request = bigtable_table_admin.GetSnapshotRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Snapshot() - post_with_metadata.return_value = table.Snapshot(), metadata - - client._get_snapshot( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test__list_snapshots_rest_bad_request( - request_type=bigtable_table_admin.ListSnapshotsRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client._list_snapshots(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListSnapshotsRequest, - dict, - ], -) -def test__list_snapshots_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client._list_snapshots(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test__list_snapshots_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_snapshots" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_list_snapshots_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.ListSnapshotsRequest.pb( - bigtable_table_admin.ListSnapshotsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.ListSnapshotsResponse.to_json( - bigtable_table_admin.ListSnapshotsResponse() - ) - req.return_value.content = return_value - - request = bigtable_table_admin.ListSnapshotsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListSnapshotsResponse() - post_with_metadata.return_value = ( - bigtable_table_admin.ListSnapshotsResponse(), - metadata, - ) - - client._list_snapshots( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test__delete_snapshot_rest_bad_request( - request_type=bigtable_table_admin.DeleteSnapshotRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client._delete_snapshot(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteSnapshotRequest, - dict, - ], -) -def test__delete_snapshot_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "" - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client._delete_snapshot(request) - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test__delete_snapshot_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_snapshot" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteSnapshotRequest.pb( - bigtable_table_admin.DeleteSnapshotRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - request = bigtable_table_admin.DeleteSnapshotRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client._delete_snapshot( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_create_backup_rest_bad_request( - request_type=bigtable_table_admin.CreateBackupRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_backup(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateBackupRequest, - dict, - ], -) -def test_create_backup_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request_init["backup"] = { - "name": "name_value", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - "backup_type": 1, - "hot_to_standard_time": {}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["backup"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["backup"][field])): - del request_init["backup"][field][i][subfield] - else: - del request_init["backup"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_backup(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_backup" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_backup_with_metadata" - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_backup" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CreateBackupRequest.pb( - bigtable_table_admin.CreateBackupRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_table_admin.CreateBackupRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.create_backup( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_get_backup_rest_bad_request( - request_type=bigtable_table_admin.GetBackupRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_backup(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetBackupRequest, - dict, - ], -) -def test_get_backup_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_backup(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - assert response.backup_type == table.Backup.BackupType.STANDARD - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_backup" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_backup_with_metadata" - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_backup" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.GetBackupRequest.pb( - bigtable_table_admin.GetBackupRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.Backup.to_json(table.Backup()) - req.return_value.content = return_value - - request = bigtable_table_admin.GetBackupRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Backup() - post_with_metadata.return_value = table.Backup(), metadata - - client.get_backup( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_update_backup_rest_bad_request( - request_type=bigtable_table_admin.UpdateBackupRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "backup": { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_backup(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UpdateBackupRequest, - dict, - ], -) -def test_update_backup_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "backup": { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - } - request_init["backup"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - "backup_type": 1, - "hot_to_standard_time": {}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["backup"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["backup"][field])): - del request_init["backup"][field][i][subfield] - else: - del request_init["backup"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_backup(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - assert response.backup_type == table.Backup.BackupType.STANDARD - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_backup" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_backup_with_metadata" - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_backup" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.UpdateBackupRequest.pb( - bigtable_table_admin.UpdateBackupRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.Backup.to_json(table.Backup()) - req.return_value.content = return_value - - request = bigtable_table_admin.UpdateBackupRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Backup() - post_with_metadata.return_value = table.Backup(), metadata - - client.update_backup( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_delete_backup_rest_bad_request( - request_type=bigtable_table_admin.DeleteBackupRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_backup(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteBackupRequest, - dict, - ], -) -def test_delete_backup_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "" - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_backup(request) - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_backup" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteBackupRequest.pb( - bigtable_table_admin.DeleteBackupRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - request = bigtable_table_admin.DeleteBackupRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_backup( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_list_backups_rest_bad_request( - request_type=bigtable_table_admin.ListBackupsRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_backups(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListBackupsRequest, - dict, - ], -) -def test_list_backups_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse( - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_backups(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_backups_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_backups" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_backups_with_metadata" - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_backups" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.ListBackupsRequest.pb( - bigtable_table_admin.ListBackupsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.ListBackupsResponse.to_json( - bigtable_table_admin.ListBackupsResponse() - ) - req.return_value.content = return_value - - request = bigtable_table_admin.ListBackupsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListBackupsResponse() - post_with_metadata.return_value = ( - bigtable_table_admin.ListBackupsResponse(), - metadata, - ) - - client.list_backups( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test__restore_table_rest_bad_request( - request_type=bigtable_table_admin.RestoreTableRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client._restore_table(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.RestoreTableRequest, - dict, - ], -) -def test__restore_table_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client._restore_table(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test__restore_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_restore_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_restore_table_with_metadata" - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_restore_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.RestoreTableRequest.pb( - bigtable_table_admin.RestoreTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_table_admin.RestoreTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client._restore_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_copy_backup_rest_bad_request( - request_type=bigtable_table_admin.CopyBackupRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.copy_backup(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CopyBackupRequest, - dict, - ], -) -def test_copy_backup_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.copy_backup(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_copy_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_copy_backup" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_copy_backup_with_metadata" - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_copy_backup" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CopyBackupRequest.pb( - bigtable_table_admin.CopyBackupRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_table_admin.CopyBackupRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client.copy_backup( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_get_iam_policy_rest_bad_request( - request_type=iam_policy_pb2.GetIamPolicyRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_iam_policy(request) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.GetIamPolicyRequest, - dict, - ], -) -def test_get_iam_policy_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_iam_policy(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_iam_policy_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_iam_policy" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_get_iam_policy_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_iam_policy" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = iam_policy_pb2.GetIamPolicyRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(policy_pb2.Policy()) - req.return_value.content = return_value - - request = iam_policy_pb2.GetIamPolicyRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - post_with_metadata.return_value = policy_pb2.Policy(), metadata - - client.get_iam_policy( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_set_iam_policy_rest_bad_request( - request_type=iam_policy_pb2.SetIamPolicyRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.set_iam_policy(request) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.SetIamPolicyRequest, - dict, - ], -) -def test_set_iam_policy_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.set_iam_policy(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_set_iam_policy_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_set_iam_policy" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_set_iam_policy_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_set_iam_policy" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = iam_policy_pb2.SetIamPolicyRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(policy_pb2.Policy()) - req.return_value.content = return_value - - request = iam_policy_pb2.SetIamPolicyRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - post_with_metadata.return_value = policy_pb2.Policy(), metadata - - client.set_iam_policy( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test_test_iam_permissions_rest_bad_request( - request_type=iam_policy_pb2.TestIamPermissionsRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.test_iam_permissions(request) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.TestIamPermissionsRequest, - dict, - ], -) -def test_test_iam_permissions_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.test_iam_permissions(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_test_iam_permissions_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_test_iam_permissions_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = iam_policy_pb2.TestIamPermissionsRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson( - iam_policy_pb2.TestIamPermissionsResponse() - ) - req.return_value.content = return_value - - request = iam_policy_pb2.TestIamPermissionsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = iam_policy_pb2.TestIamPermissionsResponse() - post_with_metadata.return_value = ( - iam_policy_pb2.TestIamPermissionsResponse(), - metadata, - ) - - client.test_iam_permissions( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test__create_schema_bundle_rest_bad_request( - request_type=bigtable_table_admin.CreateSchemaBundleRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client._create_schema_bundle(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateSchemaBundleRequest, - dict, - ], -) -def test__create_schema_bundle_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request_init["schema_bundle"] = { - "name": "name_value", - "proto_schema": {"proto_descriptors": b"proto_descriptors_blob"}, - "etag": "etag_value", - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.CreateSchemaBundleRequest.meta.fields[ - "schema_bundle" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["schema_bundle"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["schema_bundle"][field])): - del request_init["schema_bundle"][field][i][subfield] - else: - del request_init["schema_bundle"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client._create_schema_bundle(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test__create_schema_bundle_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_schema_bundle" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_create_schema_bundle_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_schema_bundle" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CreateSchemaBundleRequest.pb( - bigtable_table_admin.CreateSchemaBundleRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_table_admin.CreateSchemaBundleRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client._create_schema_bundle( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test__update_schema_bundle_rest_bad_request( - request_type=bigtable_table_admin.UpdateSchemaBundleRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "schema_bundle": { - "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" - } - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client._update_schema_bundle(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UpdateSchemaBundleRequest, - dict, - ], -) -def test__update_schema_bundle_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "schema_bundle": { - "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" - } - } - request_init["schema_bundle"] = { - "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4", - "proto_schema": {"proto_descriptors": b"proto_descriptors_blob"}, - "etag": "etag_value", - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateSchemaBundleRequest.meta.fields[ - "schema_bundle" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["schema_bundle"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["schema_bundle"][field])): - del request_init["schema_bundle"][field][i][subfield] - else: - del request_init["schema_bundle"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client._update_schema_bundle(request) - - # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test__update_schema_bundle_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_schema_bundle" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_update_schema_bundle_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_schema_bundle" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.UpdateSchemaBundleRequest.pb( - bigtable_table_admin.UpdateSchemaBundleRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - - request = bigtable_table_admin.UpdateSchemaBundleRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - - client._update_schema_bundle( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test__get_schema_bundle_rest_bad_request( - request_type=bigtable_table_admin.GetSchemaBundleRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client._get_schema_bundle(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetSchemaBundleRequest, - dict, - ], -) -def test__get_schema_bundle_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.SchemaBundle( - name="name_value", - etag="etag_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.SchemaBundle.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client._get_schema_bundle(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.SchemaBundle) - assert response.name == "name_value" - assert response.etag == "etag_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test__get_schema_bundle_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_schema_bundle" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_get_schema_bundle_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_schema_bundle" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.GetSchemaBundleRequest.pb( - bigtable_table_admin.GetSchemaBundleRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.SchemaBundle.to_json(table.SchemaBundle()) - req.return_value.content = return_value - - request = bigtable_table_admin.GetSchemaBundleRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.SchemaBundle() - post_with_metadata.return_value = table.SchemaBundle(), metadata - - client._get_schema_bundle( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test__list_schema_bundles_rest_bad_request( - request_type=bigtable_table_admin.ListSchemaBundlesRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client._list_schema_bundles(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListSchemaBundlesRequest, - dict, - ], -) -def test__list_schema_bundles_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSchemaBundlesResponse( - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSchemaBundlesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client._list_schema_bundles(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSchemaBundlesPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test__list_schema_bundles_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_schema_bundles" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_list_schema_bundles_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_schema_bundles" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.ListSchemaBundlesRequest.pb( - bigtable_table_admin.ListSchemaBundlesRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.ListSchemaBundlesResponse.to_json( - bigtable_table_admin.ListSchemaBundlesResponse() - ) - req.return_value.content = return_value - - request = bigtable_table_admin.ListSchemaBundlesRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListSchemaBundlesResponse() - post_with_metadata.return_value = ( - bigtable_table_admin.ListSchemaBundlesResponse(), - metadata, - ) - - client._list_schema_bundles( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() - - -def test__delete_schema_bundle_rest_bad_request( - request_type=bigtable_table_admin.DeleteSchemaBundleRequest, -): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client._delete_schema_bundle(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteSchemaBundleRequest, - dict, - ], -) -def test__delete_schema_bundle_rest_call_success(request_type): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "" - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client._delete_schema_bundle(request) - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test__delete_schema_bundle_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BaseBigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_schema_bundle" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteSchemaBundleRequest.pb( - bigtable_table_admin.DeleteSchemaBundleRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - request = bigtable_table_admin.DeleteSchemaBundleRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client._delete_schema_bundle( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_initialize_client_w_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - assert client is not None - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_table_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - client.create_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_table_from_snapshot_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - client.create_table_from_snapshot(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_tables_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - client.list_tables(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListTablesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_table_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - client.get_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_table_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - client.update_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_table_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - client.delete_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_undelete_table_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - client.undelete_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UndeleteTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_authorized_view_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - client.create_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_authorized_views_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - client.list_authorized_views(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_authorized_view_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - client.get_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetAuthorizedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_authorized_view_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - client.update_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_authorized_view_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - client.delete_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__modify_column_families_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - client._modify_column_families(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_drop_row_range_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - client.drop_row_range(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DropRowRangeRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_generate_consistency_token_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - client.generate_consistency_token(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_check_consistency_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - client.check_consistency(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CheckConsistencyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__snapshot_table_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - client._snapshot_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.SnapshotTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__get_snapshot_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - client._get_snapshot(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetSnapshotRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__list_snapshots_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - client._list_snapshots(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListSnapshotsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__delete_snapshot_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - client._delete_snapshot(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteSnapshotRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_backup_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - client.create_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_backup_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - client.get_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_backup_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - client.update_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_backup_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - client.delete_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_backups_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - client.list_backups(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListBackupsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__restore_table_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - client._restore_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.RestoreTableRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_copy_backup_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - client.copy_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CopyBackupRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_iam_policy_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - client.get_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.GetIamPolicyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_set_iam_policy_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - client.set_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.SetIamPolicyRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_test_iam_permissions_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - client.test_iam_permissions(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.TestIamPermissionsRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__create_schema_bundle_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_schema_bundle), "__call__" - ) as call: - client._create_schema_bundle(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateSchemaBundleRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__update_schema_bundle_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_schema_bundle), "__call__" - ) as call: - client._update_schema_bundle(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateSchemaBundleRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__get_schema_bundle_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_schema_bundle), "__call__" - ) as call: - client._get_schema_bundle(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetSchemaBundleRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__list_schema_bundles_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_schema_bundles), "__call__" - ) as call: - client._list_schema_bundles(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListSchemaBundlesRequest() - - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test__delete_schema_bundle_empty_call_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_schema_bundle), "__call__" - ) as call: - client._delete_schema_bundle(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteSchemaBundleRequest() - - assert args[0] == request_msg - - -def test_bigtable_table_admin_rest_lro_client(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - transport = client.transport - - # Ensure that we have an api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.AbstractOperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.BigtableTableAdminGrpcTransport, - ) - - -def test_bigtable_table_admin_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.BigtableTableAdminTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json", - ) - - -def test_bigtable_table_admin_base_transport(): - # Instantiate the base transport. - with mock.patch( - "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport.__init__" - ) as Transport: - Transport.return_value = None - transport = transports.BigtableTableAdminTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - "create_table", - "create_table_from_snapshot", - "list_tables", - "get_table", - "update_table", - "delete_table", - "undelete_table", - "create_authorized_view", - "list_authorized_views", - "get_authorized_view", - "update_authorized_view", - "delete_authorized_view", - "modify_column_families", - "drop_row_range", - "generate_consistency_token", - "check_consistency", - "snapshot_table", - "get_snapshot", - "list_snapshots", - "delete_snapshot", - "create_backup", - "get_backup", - "update_backup", - "delete_backup", - "list_backups", - "restore_table", - "copy_backup", - "get_iam_policy", - "set_iam_policy", - "test_iam_permissions", - "create_schema_bundle", - "update_schema_bundle", - "get_schema_bundle", - "list_schema_bundles", - "delete_schema_bundle", - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - # Catch all for all remaining methods and properties - remainder = [ - "kind", - ] - for r in remainder: - with pytest.raises(NotImplementedError): - getattr(transport, r)() - - -def test_bigtable_table_admin_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch( - "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages" - ) as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.BigtableTableAdminTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with( - "credentials.json", - scopes=None, - default_scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id="octopus", - ) - - -def test_bigtable_table_admin_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( - "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages" - ) as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.BigtableTableAdminTransport() - adc.assert_called_once() - - -def test_bigtable_table_admin_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - BaseBigtableTableAdminClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - ], -) -def test_bigtable_table_admin_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - transports.BigtableTableAdminRestTransport, - ], -) -def test_bigtable_table_admin_transport_auth_gdch_credentials(transport_class): - host = "https://language.com" - api_audience_tests = [None, "https://language2.com"] - api_audience_expect = [host, "https://language2.com"] - for t, e in zip(api_audience_tests, api_audience_expect): - with mock.patch.object(google.auth, "default", autospec=True) as adc: - gdch_mock = mock.MagicMock() - type(gdch_mock).with_gdch_audience = mock.PropertyMock( - return_value=gdch_mock - ) - adc.return_value = (gdch_mock, None) - transport_class(host=host, api_audience=t) - gdch_mock.with_gdch_audience.assert_called_once_with(e) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.BigtableTableAdminGrpcTransport, grpc_helpers), - (transports.BigtableTableAdminGrpcAsyncIOTransport, grpc_helpers_async), - ], -) -def test_bigtable_table_admin_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - - create_channel.assert_called_with( - "bigtableadmin.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - scopes=["1", "2"], - default_host="bigtableadmin.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - ], -) -def test_bigtable_table_admin_grpc_transport_client_cert_source_for_mtls( - transport_class, -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds, - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback, - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, private_key=expected_key - ) - - -def test_bigtable_table_admin_http_transport_client_cert_source_for_mtls(): - cred = ga_credentials.AnonymousCredentials() - with mock.patch( - "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" - ) as mock_configure_mtls_channel: - transports.BigtableTableAdminRestTransport( - credentials=cred, client_cert_source_for_mtls=client_cert_source_callback - ) - mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) - - -@pytest.mark.parametrize( - "transport_name", - [ - "grpc", - "grpc_asyncio", - "rest", - ], -) -def test_bigtable_table_admin_host_no_port(transport_name): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions( - api_endpoint="bigtableadmin.googleapis.com" - ), - transport=transport_name, - ) - assert client.transport._host == ( - "bigtableadmin.googleapis.com:443" - if transport_name in ["grpc", "grpc_asyncio"] - else "https://bigtableadmin.googleapis.com" - ) - - -@pytest.mark.parametrize( - "transport_name", - [ - "grpc", - "grpc_asyncio", - "rest", - ], -) -def test_bigtable_table_admin_host_with_port(transport_name): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions( - api_endpoint="bigtableadmin.googleapis.com:8000" - ), - transport=transport_name, - ) - assert client.transport._host == ( - "bigtableadmin.googleapis.com:8000" - if transport_name in ["grpc", "grpc_asyncio"] - else "https://bigtableadmin.googleapis.com:8000" - ) - - -@pytest.mark.parametrize( - "transport_name", - [ - "rest", - ], -) -def test_bigtable_table_admin_client_transport_session_collision(transport_name): - creds1 = ga_credentials.AnonymousCredentials() - creds2 = ga_credentials.AnonymousCredentials() - client1 = BaseBigtableTableAdminClient( - credentials=creds1, - transport=transport_name, - ) - client2 = BaseBigtableTableAdminClient( - credentials=creds2, - transport=transport_name, - ) - session1 = client1.transport.create_table._session - session2 = client2.transport.create_table._session - assert session1 != session2 - session1 = client1.transport.create_table_from_snapshot._session - session2 = client2.transport.create_table_from_snapshot._session - assert session1 != session2 - session1 = client1.transport.list_tables._session - session2 = client2.transport.list_tables._session - assert session1 != session2 - session1 = client1.transport.get_table._session - session2 = client2.transport.get_table._session - assert session1 != session2 - session1 = client1.transport.update_table._session - session2 = client2.transport.update_table._session - assert session1 != session2 - session1 = client1.transport.delete_table._session - session2 = client2.transport.delete_table._session - assert session1 != session2 - session1 = client1.transport.undelete_table._session - session2 = client2.transport.undelete_table._session - assert session1 != session2 - session1 = client1.transport.create_authorized_view._session - session2 = client2.transport.create_authorized_view._session - assert session1 != session2 - session1 = client1.transport.list_authorized_views._session - session2 = client2.transport.list_authorized_views._session - assert session1 != session2 - session1 = client1.transport.get_authorized_view._session - session2 = client2.transport.get_authorized_view._session - assert session1 != session2 - session1 = client1.transport.update_authorized_view._session - session2 = client2.transport.update_authorized_view._session - assert session1 != session2 - session1 = client1.transport.delete_authorized_view._session - session2 = client2.transport.delete_authorized_view._session - assert session1 != session2 - session1 = client1.transport.modify_column_families._session - session2 = client2.transport.modify_column_families._session - assert session1 != session2 - session1 = client1.transport.drop_row_range._session - session2 = client2.transport.drop_row_range._session - assert session1 != session2 - session1 = client1.transport.generate_consistency_token._session - session2 = client2.transport.generate_consistency_token._session - assert session1 != session2 - session1 = client1.transport.check_consistency._session - session2 = client2.transport.check_consistency._session - assert session1 != session2 - session1 = client1.transport.snapshot_table._session - session2 = client2.transport.snapshot_table._session - assert session1 != session2 - session1 = client1.transport.get_snapshot._session - session2 = client2.transport.get_snapshot._session - assert session1 != session2 - session1 = client1.transport.list_snapshots._session - session2 = client2.transport.list_snapshots._session - assert session1 != session2 - session1 = client1.transport.delete_snapshot._session - session2 = client2.transport.delete_snapshot._session - assert session1 != session2 - session1 = client1.transport.create_backup._session - session2 = client2.transport.create_backup._session - assert session1 != session2 - session1 = client1.transport.get_backup._session - session2 = client2.transport.get_backup._session - assert session1 != session2 - session1 = client1.transport.update_backup._session - session2 = client2.transport.update_backup._session - assert session1 != session2 - session1 = client1.transport.delete_backup._session - session2 = client2.transport.delete_backup._session - assert session1 != session2 - session1 = client1.transport.list_backups._session - session2 = client2.transport.list_backups._session - assert session1 != session2 - session1 = client1.transport.restore_table._session - session2 = client2.transport.restore_table._session - assert session1 != session2 - session1 = client1.transport.copy_backup._session - session2 = client2.transport.copy_backup._session - assert session1 != session2 - session1 = client1.transport.get_iam_policy._session - session2 = client2.transport.get_iam_policy._session - assert session1 != session2 - session1 = client1.transport.set_iam_policy._session - session2 = client2.transport.set_iam_policy._session - assert session1 != session2 - session1 = client1.transport.test_iam_permissions._session - session2 = client2.transport.test_iam_permissions._session - assert session1 != session2 - session1 = client1.transport.create_schema_bundle._session - session2 = client2.transport.create_schema_bundle._session - assert session1 != session2 - session1 = client1.transport.update_schema_bundle._session - session2 = client2.transport.update_schema_bundle._session - assert session1 != session2 - session1 = client1.transport.get_schema_bundle._session - session2 = client2.transport.get_schema_bundle._session - assert session1 != session2 - session1 = client1.transport.list_schema_bundles._session - session2 = client2.transport.list_schema_bundles._session - assert session1 != session2 - session1 = client1.transport.delete_schema_bundle._session - session2 = client2.transport.delete_schema_bundle._session - assert session1 != session2 - - -def test_bigtable_table_admin_grpc_transport_channel(): - channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.BigtableTableAdminGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_bigtable_table_admin_grpc_asyncio_transport_channel(): - channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.BigtableTableAdminGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - ], -) -def test_bigtable_table_admin_transport_channel_mtls_with_client_cert_source( - transport_class, -): - with mock.patch( - "grpc.ssl_channel_credentials", autospec=True - ) as grpc_ssl_channel_cred: - with mock.patch.object( - transport_class, "create_channel" - ) as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - ], -) -def test_bigtable_table_admin_transport_channel_mtls_with_adc(transport_class): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object( - transport_class, "create_channel" - ) as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_bigtable_table_admin_grpc_lro_client(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_bigtable_table_admin_grpc_lro_async_client(): - client = BaseBigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_authorized_view_path(): - project = "squid" - instance = "clam" - table = "whelk" - authorized_view = "octopus" - expected = "projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}".format( - project=project, - instance=instance, - table=table, - authorized_view=authorized_view, - ) - actual = BaseBigtableTableAdminClient.authorized_view_path( - project, instance, table, authorized_view - ) - assert expected == actual - - -def test_parse_authorized_view_path(): - expected = { - "project": "oyster", - "instance": "nudibranch", - "table": "cuttlefish", - "authorized_view": "mussel", - } - path = BaseBigtableTableAdminClient.authorized_view_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_authorized_view_path(path) - assert expected == actual - - -def test_backup_path(): - project = "winkle" - instance = "nautilus" - cluster = "scallop" - backup = "abalone" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( - project=project, - instance=instance, - cluster=cluster, - backup=backup, - ) - actual = BaseBigtableTableAdminClient.backup_path( - project, instance, cluster, backup - ) - assert expected == actual - - -def test_parse_backup_path(): - expected = { - "project": "squid", - "instance": "clam", - "cluster": "whelk", - "backup": "octopus", - } - path = BaseBigtableTableAdminClient.backup_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_backup_path(path) - assert expected == actual - - -def test_cluster_path(): - project = "oyster" - instance = "nudibranch" - cluster = "cuttlefish" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format( - project=project, - instance=instance, - cluster=cluster, - ) - actual = BaseBigtableTableAdminClient.cluster_path(project, instance, cluster) - assert expected == actual - - -def test_parse_cluster_path(): - expected = { - "project": "mussel", - "instance": "winkle", - "cluster": "nautilus", - } - path = BaseBigtableTableAdminClient.cluster_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_cluster_path(path) - assert expected == actual - - -def test_crypto_key_version_path(): - project = "scallop" - location = "abalone" - key_ring = "squid" - crypto_key = "clam" - crypto_key_version = "whelk" - expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format( - project=project, - location=location, - key_ring=key_ring, - crypto_key=crypto_key, - crypto_key_version=crypto_key_version, - ) - actual = BaseBigtableTableAdminClient.crypto_key_version_path( - project, location, key_ring, crypto_key, crypto_key_version - ) - assert expected == actual - - -def test_parse_crypto_key_version_path(): - expected = { - "project": "octopus", - "location": "oyster", - "key_ring": "nudibranch", - "crypto_key": "cuttlefish", - "crypto_key_version": "mussel", - } - path = BaseBigtableTableAdminClient.crypto_key_version_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_crypto_key_version_path(path) - assert expected == actual - - -def test_instance_path(): - project = "winkle" - instance = "nautilus" - expected = "projects/{project}/instances/{instance}".format( - project=project, - instance=instance, - ) - actual = BaseBigtableTableAdminClient.instance_path(project, instance) - assert expected == actual - - -def test_parse_instance_path(): - expected = { - "project": "scallop", - "instance": "abalone", - } - path = BaseBigtableTableAdminClient.instance_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_instance_path(path) - assert expected == actual - - -def test_schema_bundle_path(): - project = "squid" - instance = "clam" - table = "whelk" - schema_bundle = "octopus" - expected = "projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}".format( - project=project, - instance=instance, - table=table, - schema_bundle=schema_bundle, - ) - actual = BaseBigtableTableAdminClient.schema_bundle_path( - project, instance, table, schema_bundle - ) - assert expected == actual - - -def test_parse_schema_bundle_path(): - expected = { - "project": "oyster", - "instance": "nudibranch", - "table": "cuttlefish", - "schema_bundle": "mussel", - } - path = BaseBigtableTableAdminClient.schema_bundle_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_schema_bundle_path(path) - assert expected == actual - - -def test_snapshot_path(): - project = "winkle" - instance = "nautilus" - cluster = "scallop" - snapshot = "abalone" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( - project=project, - instance=instance, - cluster=cluster, - snapshot=snapshot, - ) - actual = BaseBigtableTableAdminClient.snapshot_path( - project, instance, cluster, snapshot - ) - assert expected == actual - - -def test_parse_snapshot_path(): - expected = { - "project": "squid", - "instance": "clam", - "cluster": "whelk", - "snapshot": "octopus", - } - path = BaseBigtableTableAdminClient.snapshot_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_snapshot_path(path) - assert expected == actual - - -def test_table_path(): - project = "oyster" - instance = "nudibranch" - table = "cuttlefish" - expected = "projects/{project}/instances/{instance}/tables/{table}".format( - project=project, - instance=instance, - table=table, - ) - actual = BaseBigtableTableAdminClient.table_path(project, instance, table) - assert expected == actual - - -def test_parse_table_path(): - expected = { - "project": "mussel", - "instance": "winkle", - "table": "nautilus", - } - path = BaseBigtableTableAdminClient.table_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_table_path(path) - assert expected == actual - - -def test_common_billing_account_path(): - billing_account = "scallop" - expected = "billingAccounts/{billing_account}".format( - billing_account=billing_account, - ) - actual = BaseBigtableTableAdminClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "abalone", - } - path = BaseBigtableTableAdminClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_common_billing_account_path(path) - assert expected == actual - - -def test_common_folder_path(): - folder = "squid" - expected = "folders/{folder}".format( - folder=folder, - ) - actual = BaseBigtableTableAdminClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "clam", - } - path = BaseBigtableTableAdminClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_common_folder_path(path) - assert expected == actual - - -def test_common_organization_path(): - organization = "whelk" - expected = "organizations/{organization}".format( - organization=organization, - ) - actual = BaseBigtableTableAdminClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "octopus", - } - path = BaseBigtableTableAdminClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_common_organization_path(path) - assert expected == actual - - -def test_common_project_path(): - project = "oyster" - expected = "projects/{project}".format( - project=project, - ) - actual = BaseBigtableTableAdminClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nudibranch", - } - path = BaseBigtableTableAdminClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_common_project_path(path) - assert expected == actual - - -def test_common_location_path(): - project = "cuttlefish" - location = "mussel" - expected = "projects/{project}/locations/{location}".format( - project=project, - location=location, - ) - actual = BaseBigtableTableAdminClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "winkle", - "location": "nautilus", - } - path = BaseBigtableTableAdminClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = BaseBigtableTableAdminClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_with_default_client_info(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object( - transports.BigtableTableAdminTransport, "_prep_wrapped_messages" - ) as prep: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object( - transports.BigtableTableAdminTransport, "_prep_wrapped_messages" - ) as prep: - transport_class = BaseBigtableTableAdminClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -def test_transport_close_grpc(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc" - ) - with mock.patch.object( - type(getattr(client.transport, "_grpc_channel")), "close" - ) as close: - with client: - close.assert_not_called() - close.assert_called_once() - - -@pytest.mark.asyncio -async def test_transport_close_grpc_asyncio(): - client = BaseBigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), transport="grpc_asyncio" - ) - with mock.patch.object( - type(getattr(client.transport, "_grpc_channel")), "close" - ) as close: - async with client: - close.assert_not_called() - close.assert_called_once() - - -def test_transport_close_rest(): - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - with mock.patch.object( - type(getattr(client.transport, "_session")), "close" - ) as close: - with client: - close.assert_not_called() - close.assert_called_once() - - -def test_client_ctx(): - transports = [ - "rest", - "grpc", - ] - for transport in transports: - client = BaseBigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() - - -@pytest.mark.parametrize( - "client_class,transport_class", - [ - (BaseBigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport), - ( - BaseBigtableTableAdminAsyncClient, - transports.BigtableTableAdminGrpcAsyncIOTransport, - ), - ], -) -def test_api_key_credentials(client_class, transport_class): - with mock.patch.object( - google.auth._default, "get_api_key_credentials", create=True - ) as get_api_key_credentials: - mock_cred = mock.Mock() - get_api_key_credentials.return_value = mock_cred - options = client_options.ClientOptions() - options.api_key = "api_key" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=mock_cred, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE - ), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - )