From fd577f916f7bb77e694c29d05a8258c9fff744f6 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 13 Feb 2021 05:55:40 -0800 Subject: [PATCH 01/11] changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. --- google/cloud/bigtable_admin_v2/__init__.py | 167 +- .../cloud/bigtable_admin_v2/gapic/__init__.py | 0 .../gapic/bigtable_instance_admin_client.py | 1891 +++++++++ .../bigtable_instance_admin_client_config.py | 136 + .../gapic/bigtable_table_admin_client.py | 2292 +++++++++++ .../bigtable_table_admin_client_config.py | 160 + google/cloud/bigtable_admin_v2/gapic/enums.py | 213 + .../gapic/transports/__init__.py | 0 .../bigtable_instance_admin_grpc_transport.py | 380 ++ .../bigtable_table_admin_grpc_transport.py | 471 +++ .../cloud/bigtable_admin_v2/proto/__init__.py | 0 .../proto/bigtable_instance_admin.proto | 11 +- .../proto/bigtable_instance_admin_pb2.py | 2432 +++++++++++ .../proto/bigtable_instance_admin_pb2_grpc.py | 895 +++++ .../proto/bigtable_table_admin.proto | 327 +- .../proto/bigtable_table_admin_pb2.py | 3574 +++++++++++++++++ .../proto/bigtable_table_admin_pb2_grpc.py | 1090 +++++ .../bigtable_admin_v2/proto/common_pb2.py | 188 + .../proto/common_pb2_grpc.py | 3 + .../bigtable_admin_v2/proto/instance_pb2.py | 886 ++++ .../proto/instance_pb2_grpc.py | 3 + .../bigtable_admin_v2/proto/table_pb2.py | 1682 ++++++++ .../bigtable_admin_v2/proto/table_pb2_grpc.py | 3 + google/cloud/bigtable_admin_v2/types.py | 76 + google/cloud/bigtable_v2/__init__.py | 75 +- google/cloud/bigtable_v2/gapic/__init__.py | 0 .../bigtable_v2/gapic/bigtable_client.py | 771 ++++ .../gapic/bigtable_client_config.py | 80 + .../bigtable_v2/gapic/transports/__init__.py | 0 .../transports/bigtable_grpc_transport.py | 207 + google/cloud/bigtable_v2/proto/__init__.py | 0 .../cloud/bigtable_v2/proto/bigtable_pb2.py | 1798 +++++++++ .../bigtable_v2/proto/bigtable_pb2_grpc.py | 317 ++ google/cloud/bigtable_v2/proto/data_pb2.py | 2668 ++++++++++++ .../cloud/bigtable_v2/proto/data_pb2_grpc.py | 3 + google/cloud/bigtable_v2/types.py | 54 + noxfile.py | 2 +- synth.metadata | 4 +- .../unit/gapic/v2/test_bigtable_client_v2.py | 316 ++ .../test_bigtable_instance_admin_client_v2.py | 924 +++++ .../v2/test_bigtable_table_admin_client_v2.py | 1039 +++++ 41 files changed, 24776 insertions(+), 362 deletions(-) create mode 100644 google/cloud/bigtable_admin_v2/gapic/__init__.py create mode 100644 google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py create mode 100644 google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py create mode 100644 google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py create mode 100644 google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py create mode 100644 google/cloud/bigtable_admin_v2/gapic/enums.py create mode 100644 google/cloud/bigtable_admin_v2/gapic/transports/__init__.py create mode 100644 google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py create mode 100644 google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py create mode 100644 google/cloud/bigtable_admin_v2/proto/__init__.py create mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py create mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py create mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py create mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py create mode 100644 google/cloud/bigtable_admin_v2/proto/common_pb2.py create mode 100644 google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py create mode 100644 google/cloud/bigtable_admin_v2/proto/instance_pb2.py create mode 100644 google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py create mode 100644 google/cloud/bigtable_admin_v2/proto/table_pb2.py create mode 100644 google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py create mode 100644 google/cloud/bigtable_admin_v2/types.py create mode 100644 google/cloud/bigtable_v2/gapic/__init__.py create mode 100644 google/cloud/bigtable_v2/gapic/bigtable_client.py create mode 100644 google/cloud/bigtable_v2/gapic/bigtable_client_config.py create mode 100644 google/cloud/bigtable_v2/gapic/transports/__init__.py create mode 100644 google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py create mode 100644 google/cloud/bigtable_v2/proto/__init__.py create mode 100644 google/cloud/bigtable_v2/proto/bigtable_pb2.py create mode 100644 google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py create mode 100644 google/cloud/bigtable_v2/proto/data_pb2.py create mode 100644 google/cloud/bigtable_v2/proto/data_pb2_grpc.py create mode 100644 google/cloud/bigtable_v2/types.py create mode 100644 tests/unit/gapic/v2/test_bigtable_client_v2.py create mode 100644 tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py create mode 100644 tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py index 423742502..9f72d4f53 100644 --- a/google/cloud/bigtable_admin_v2/__init__.py +++ b/google/cloud/bigtable_admin_v2/__init__.py @@ -1,153 +1,54 @@ # -*- coding: utf-8 -*- - +# # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -from .services.bigtable_instance_admin import BigtableInstanceAdminClient -from .services.bigtable_table_admin import BigtableTableAdminClient -from .types.bigtable_instance_admin import CreateAppProfileRequest -from .types.bigtable_instance_admin import CreateClusterMetadata -from .types.bigtable_instance_admin import CreateClusterRequest -from .types.bigtable_instance_admin import CreateInstanceMetadata -from .types.bigtable_instance_admin import CreateInstanceRequest -from .types.bigtable_instance_admin import DeleteAppProfileRequest -from .types.bigtable_instance_admin import DeleteClusterRequest -from .types.bigtable_instance_admin import DeleteInstanceRequest -from .types.bigtable_instance_admin import GetAppProfileRequest -from .types.bigtable_instance_admin import GetClusterRequest -from .types.bigtable_instance_admin import GetInstanceRequest -from .types.bigtable_instance_admin import ListAppProfilesRequest -from .types.bigtable_instance_admin import ListAppProfilesResponse -from .types.bigtable_instance_admin import ListClustersRequest -from .types.bigtable_instance_admin import ListClustersResponse -from .types.bigtable_instance_admin import ListInstancesRequest -from .types.bigtable_instance_admin import ListInstancesResponse -from .types.bigtable_instance_admin import PartialUpdateInstanceRequest -from .types.bigtable_instance_admin import UpdateAppProfileMetadata -from .types.bigtable_instance_admin import UpdateAppProfileRequest -from .types.bigtable_instance_admin import UpdateClusterMetadata -from .types.bigtable_instance_admin import UpdateInstanceMetadata -from .types.bigtable_table_admin import CheckConsistencyRequest -from .types.bigtable_table_admin import CheckConsistencyResponse -from .types.bigtable_table_admin import CreateBackupMetadata -from .types.bigtable_table_admin import CreateBackupRequest -from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata -from .types.bigtable_table_admin import CreateTableFromSnapshotRequest -from .types.bigtable_table_admin import CreateTableRequest -from .types.bigtable_table_admin import DeleteBackupRequest -from .types.bigtable_table_admin import DeleteSnapshotRequest -from .types.bigtable_table_admin import DeleteTableRequest -from .types.bigtable_table_admin import DropRowRangeRequest -from .types.bigtable_table_admin import GenerateConsistencyTokenRequest -from .types.bigtable_table_admin import GenerateConsistencyTokenResponse -from .types.bigtable_table_admin import GetBackupRequest -from .types.bigtable_table_admin import GetSnapshotRequest -from .types.bigtable_table_admin import GetTableRequest -from .types.bigtable_table_admin import ListBackupsRequest -from .types.bigtable_table_admin import ListBackupsResponse -from .types.bigtable_table_admin import ListSnapshotsRequest -from .types.bigtable_table_admin import ListSnapshotsResponse -from .types.bigtable_table_admin import ListTablesRequest -from .types.bigtable_table_admin import ListTablesResponse -from .types.bigtable_table_admin import ModifyColumnFamiliesRequest -from .types.bigtable_table_admin import OptimizeRestoredTableMetadata -from .types.bigtable_table_admin import RestoreTableMetadata -from .types.bigtable_table_admin import RestoreTableRequest -from .types.bigtable_table_admin import SnapshotTableMetadata -from .types.bigtable_table_admin import SnapshotTableRequest -from .types.bigtable_table_admin import UpdateBackupRequest -from .types.common import OperationProgress -from .types.common import StorageType -from .types.instance import AppProfile -from .types.instance import Cluster -from .types.instance import Instance -from .types.table import Backup -from .types.table import BackupInfo -from .types.table import ColumnFamily -from .types.table import GcRule -from .types.table import RestoreInfo -from .types.table import RestoreSourceType -from .types.table import Snapshot -from .types.table import Table + +from __future__ import absolute_import +import sys +import warnings + +from google.cloud.bigtable_admin_v2 import types +from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client +from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client +from google.cloud.bigtable_admin_v2.gapic import enums + + +if sys.version_info[:2] == (2, 7): + message = ( + "A future version of this library will drop support for Python 2.7. " + "More details about Python 2 support for Google Cloud Client Libraries " + "can be found at https://cloud.google.com/python/docs/python2-sunset/" + ) + warnings.warn(message, DeprecationWarning) + + +class BigtableInstanceAdminClient( + bigtable_instance_admin_client.BigtableInstanceAdminClient +): + __doc__ = bigtable_instance_admin_client.BigtableInstanceAdminClient.__doc__ + enums = enums + + +class BigtableTableAdminClient(bigtable_table_admin_client.BigtableTableAdminClient): + __doc__ = bigtable_table_admin_client.BigtableTableAdminClient.__doc__ + enums = enums __all__ = ( - "AppProfile", - "Backup", - "BackupInfo", + "enums", + "types", "BigtableInstanceAdminClient", - "CheckConsistencyRequest", - "CheckConsistencyResponse", - "Cluster", - "ColumnFamily", - "CreateAppProfileRequest", - "CreateBackupMetadata", - "CreateBackupRequest", - "CreateClusterMetadata", - "CreateClusterRequest", - "CreateInstanceMetadata", - "CreateInstanceRequest", - "CreateTableFromSnapshotMetadata", - "CreateTableFromSnapshotRequest", - "CreateTableRequest", - "DeleteAppProfileRequest", - "DeleteBackupRequest", - "DeleteClusterRequest", - "DeleteInstanceRequest", - "DeleteSnapshotRequest", - "DeleteTableRequest", - "DropRowRangeRequest", - "GcRule", - "GenerateConsistencyTokenRequest", - "GenerateConsistencyTokenResponse", - "GetAppProfileRequest", - "GetBackupRequest", - "GetClusterRequest", - "GetInstanceRequest", - "GetSnapshotRequest", - "GetTableRequest", - "Instance", - "ListAppProfilesRequest", - "ListAppProfilesResponse", - "ListBackupsRequest", - "ListBackupsResponse", - "ListClustersRequest", - "ListClustersResponse", - "ListInstancesRequest", - "ListInstancesResponse", - "ListSnapshotsRequest", - "ListSnapshotsResponse", - "ListTablesRequest", - "ListTablesResponse", - "ModifyColumnFamiliesRequest", - "OperationProgress", - "OptimizeRestoredTableMetadata", - "PartialUpdateInstanceRequest", - "RestoreInfo", - "RestoreSourceType", - "RestoreTableMetadata", - "RestoreTableRequest", - "Snapshot", - "SnapshotTableMetadata", - "SnapshotTableRequest", - "StorageType", - "Table", - "UpdateAppProfileMetadata", - "UpdateAppProfileRequest", - "UpdateBackupRequest", - "UpdateClusterMetadata", - "UpdateInstanceMetadata", "BigtableTableAdminClient", ) diff --git a/google/cloud/bigtable_admin_v2/gapic/__init__.py b/google/cloud/bigtable_admin_v2/gapic/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py new file mode 100644 index 000000000..27586d140 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -0,0 +1,1891 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Accesses the google.bigtable.admin.v2 BigtableInstanceAdmin API.""" + +import functools +import pkg_resources +import warnings + +from google.oauth2 import service_account +import google.api_core.client_options +import google.api_core.gapic_v1.client_info +import google.api_core.gapic_v1.config +import google.api_core.gapic_v1.method +import google.api_core.gapic_v1.routing_header +import google.api_core.grpc_helpers +import google.api_core.operation +import google.api_core.operations_v1 +import google.api_core.page_iterator +import google.api_core.path_template +import grpc + +from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client_config +from google.cloud.bigtable_admin_v2.gapic import enums +from google.cloud.bigtable_admin_v2.gapic.transports import ( + bigtable_instance_admin_grpc_transport, +) +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc +from google.cloud.bigtable_admin_v2.proto import instance_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import options_pb2 +from google.iam.v1 import policy_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 + + +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + "google-cloud-bigtable-admin", +).version + + +class BigtableInstanceAdminClient(object): + """ + Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables' metadata or data stored in those tables. + """ + + SERVICE_ADDRESS = "bigtableadmin.googleapis.com:443" + """The default address of the service.""" + + # The name of the interface for this client. This is the key used to + # find the method configuration in the client_config dictionary. + _INTERFACE_NAME = "google.bigtable.admin.v2.BigtableInstanceAdmin" + + @classmethod + def from_service_account_file(cls, filename, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableInstanceAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @classmethod + def app_profile_path(cls, project, instance, app_profile): + """Return a fully-qualified app_profile string.""" + return google.api_core.path_template.expand( + "projects/{project}/instances/{instance}/appProfiles/{app_profile}", + project=project, + instance=instance, + app_profile=app_profile, + ) + + @classmethod + def cluster_path(cls, project, instance, cluster): + """Return a fully-qualified cluster string.""" + return google.api_core.path_template.expand( + "projects/{project}/instances/{instance}/clusters/{cluster}", + project=project, + instance=instance, + cluster=cluster, + ) + + @classmethod + def instance_path(cls, project, instance): + """Return a fully-qualified instance string.""" + return google.api_core.path_template.expand( + "projects/{project}/instances/{instance}", + project=project, + instance=instance, + ) + + @classmethod + def location_path(cls, project, location): + """Return a fully-qualified location string.""" + return google.api_core.path_template.expand( + "projects/{project}/locations/{location}", + project=project, + location=location, + ) + + @classmethod + def project_path(cls, project): + """Return a fully-qualified project string.""" + return google.api_core.path_template.expand( + "projects/{project}", project=project, + ) + + def __init__( + self, + transport=None, + channel=None, + credentials=None, + client_config=None, + client_info=None, + client_options=None, + ): + """Constructor. + + Args: + transport (Union[~.BigtableInstanceAdminGrpcTransport, + Callable[[~.Credentials, type], ~.BigtableInstanceAdminGrpcTransport]): A transport + instance, responsible for actually making the API calls. + The default transport uses the gRPC protocol. + This argument may also be a callable which returns a + transport instance. Callables will be sent the credentials + as the first argument and the default transport class as + the second argument. + channel (grpc.Channel): DEPRECATED. A ``Channel`` instance + through which to make calls. This argument is mutually exclusive + with ``credentials``; providing both will raise an exception. + credentials (google.auth.credentials.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is mutually exclusive with providing a + transport instance to ``transport``; doing so will raise + an exception. + client_config (dict): DEPRECATED. A dictionary of call options for + each method. If not specified, the default configuration is used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + client_options (Union[dict, google.api_core.client_options.ClientOptions]): + Client options used to set user options on the client. API Endpoint + should be set through client_options. + """ + # Raise deprecation warnings for things we want to go away. + if client_config is not None: + warnings.warn( + "The `client_config` argument is deprecated.", + PendingDeprecationWarning, + stacklevel=2, + ) + else: + client_config = bigtable_instance_admin_client_config.config + + if channel: + warnings.warn( + "The `channel` argument is deprecated; use " "`transport` instead.", + PendingDeprecationWarning, + stacklevel=2, + ) + + api_endpoint = self.SERVICE_ADDRESS + if client_options: + if type(client_options) == dict: + client_options = google.api_core.client_options.from_dict( + client_options + ) + if client_options.api_endpoint: + api_endpoint = client_options.api_endpoint + + # Instantiate the transport. + # The transport is responsible for handling serialization and + # deserialization and actually sending data to the service. + if transport: + if callable(transport): + self.transport = transport( + credentials=credentials, + default_class=bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport, + address=api_endpoint, + ) + else: + if credentials: + raise ValueError( + "Received both a transport instance and " + "credentials; these are mutually exclusive." + ) + self.transport = transport + else: + self.transport = bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport( + address=api_endpoint, channel=channel, credentials=credentials, + ) + + if client_info is None: + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, + ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION + self._client_info = client_info + + # Parse out the default settings for retry and timeout for each RPC + # from the client configuration. + # (Ordinarily, these are the defaults specified in the `*_config.py` + # file next to this one.) + self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( + client_config["interfaces"][self._INTERFACE_NAME], + ) + + # Save a dictionary of cached API call functions. + # These are the actual callables which invoke the proper + # transport methods, wrapped with `wrap_method` to add retry, + # timeout, and the like. + self._inner_api_calls = {} + + # Service calls + def create_instance( + self, + parent, + instance_id, + instance, + clusters, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Create an instance within a project. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> parent = client.project_path('[PROJECT]') + >>> + >>> # TODO: Initialize `instance_id`: + >>> instance_id = '' + >>> + >>> # TODO: Initialize `instance`: + >>> instance = {} + >>> + >>> # TODO: Initialize `clusters`: + >>> clusters = {} + >>> + >>> response = client.create_instance(parent, instance_id, instance, clusters) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + parent (str): Required. The unique name of the project in which to create the new + instance. Values are of the form ``projects/{project}``. + instance_id (str): Required. The ID to be used when referring to the new instance + within its project, e.g., just ``myinstance`` rather than + ``projects/myproject/instances/myinstance``. + instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): Required. The instance to create. Fields marked ``OutputOnly`` must + be left blank. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Instance` + clusters (dict[str -> Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]]): Required. The clusters to be created within the instance, mapped by + desired cluster ID, e.g., just ``mycluster`` rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. Fields + marked ``OutputOnly`` must be left blank. Currently, at most four + clusters can be specified. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "create_instance" not in self._inner_api_calls: + self._inner_api_calls[ + "create_instance" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_instance, + default_retry=self._method_configs["CreateInstance"].retry, + default_timeout=self._method_configs["CreateInstance"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.CreateInstanceRequest( + parent=parent, + instance_id=instance_id, + instance=instance, + clusters=clusters, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["create_instance"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + instance_pb2.Instance, + metadata_type=bigtable_instance_admin_pb2.CreateInstanceMetadata, + ) + + def get_instance( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets information about an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> response = client.get_instance(name) + + Args: + name (str): Required. The unique name of the requested instance. Values are of + the form ``projects/{project}/instances/{instance}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_instance" not in self._inner_api_calls: + self._inner_api_calls[ + "get_instance" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_instance, + default_retry=self._method_configs["GetInstance"].retry, + default_timeout=self._method_configs["GetInstance"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_instance"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def list_instances( + self, + parent, + page_token=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Lists information about instances in a project. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> parent = client.project_path('[PROJECT]') + >>> + >>> response = client.list_instances(parent) + + Args: + parent (str): Required. The unique name of the project for which a list of + instances is requested. Values are of the form ``projects/{project}``. + page_token (str): DEPRECATED: This field is unused and ignored. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.ListInstancesResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "list_instances" not in self._inner_api_calls: + self._inner_api_calls[ + "list_instances" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_instances, + default_retry=self._method_configs["ListInstances"].retry, + default_timeout=self._method_configs["ListInstances"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.ListInstancesRequest( + parent=parent, page_token=page_token, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["list_instances"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def update_instance( + self, + display_name, + name=None, + state=None, + type_=None, + labels=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Updates an instance within a project. This method updates only the display + name and type for an Instance. To update other Instance properties, such as + labels, use PartialUpdateInstance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> # TODO: Initialize `display_name`: + >>> display_name = '' + >>> + >>> response = client.update_instance(display_name) + + Args: + display_name (str): Required. The descriptive name for this instance as it appears in UIs. + Can be changed at any time, but should be kept globally unique + to avoid confusion. + name (str): The unique name of the instance. Values are of the form + ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. + state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the instance. + type_ (~google.cloud.bigtable_admin_v2.types.Type): The type of the instance. Defaults to ``PRODUCTION``. + labels (dict[str -> str]): Labels are a flexible and lightweight mechanism for organizing cloud + resources into groups that reflect a customer's organizational needs and + deployment strategies. They can be used to filter resources and + aggregate metrics. + + - Label keys must be between 1 and 63 characters long and must conform + to the regular expression: + ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. + - Label values must be between 0 and 63 characters long and must + conform to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. + - No more than 64 labels can be associated with a given resource. + - Keys and values must both be under 128 bytes. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "update_instance" not in self._inner_api_calls: + self._inner_api_calls[ + "update_instance" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_instance, + default_retry=self._method_configs["UpdateInstance"].retry, + default_timeout=self._method_configs["UpdateInstance"].timeout, + client_info=self._client_info, + ) + + request = instance_pb2.Instance( + display_name=display_name, + name=name, + state=state, + type=type_, + labels=labels, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["update_instance"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def partial_update_instance( + self, + instance, + update_mask, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Partially updates an instance within a project. This method can modify all + fields of an Instance and is the preferred way to update an Instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> # TODO: Initialize `instance`: + >>> instance = {} + >>> + >>> # TODO: Initialize `update_mask`: + >>> update_mask = {} + >>> + >>> response = client.partial_update_instance(instance, update_mask) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): Required. The Instance which will (partially) replace the current value. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Instance` + update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of Instance fields which should be replaced. + Must be explicitly set. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "partial_update_instance" not in self._inner_api_calls: + self._inner_api_calls[ + "partial_update_instance" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.partial_update_instance, + default_retry=self._method_configs["PartialUpdateInstance"].retry, + default_timeout=self._method_configs["PartialUpdateInstance"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( + instance=instance, update_mask=update_mask, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("instance.name", instance.name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["partial_update_instance"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + instance_pb2.Instance, + metadata_type=bigtable_instance_admin_pb2.UpdateInstanceMetadata, + ) + + def delete_instance( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Delete an instance from a project. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> client.delete_instance(name) + + Args: + name (str): Required. The unique name of the instance to be deleted. Values are + of the form ``projects/{project}/instances/{instance}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "delete_instance" not in self._inner_api_calls: + self._inner_api_calls[ + "delete_instance" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_instance, + default_retry=self._method_configs["DeleteInstance"].retry, + default_timeout=self._method_configs["DeleteInstance"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + self._inner_api_calls["delete_instance"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def create_cluster( + self, + parent, + cluster_id, + cluster, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Creates a cluster within an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize `cluster_id`: + >>> cluster_id = '' + >>> + >>> # TODO: Initialize `cluster`: + >>> cluster = {} + >>> + >>> response = client.create_cluster(parent, cluster_id, cluster) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + parent (str): Required. The unique name of the instance in which to create the new + cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + cluster_id (str): Required. The ID to be used when referring to the new cluster within + its instance, e.g., just ``mycluster`` rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): Required. The cluster to be created. Fields marked ``OutputOnly`` + must be left blank. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "create_cluster" not in self._inner_api_calls: + self._inner_api_calls[ + "create_cluster" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_cluster, + default_retry=self._method_configs["CreateCluster"].retry, + default_timeout=self._method_configs["CreateCluster"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.CreateClusterRequest( + parent=parent, cluster_id=cluster_id, cluster=cluster, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["create_cluster"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + instance_pb2.Cluster, + metadata_type=bigtable_instance_admin_pb2.CreateClusterMetadata, + ) + + def get_cluster( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets information about a cluster. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> + >>> response = client.get_cluster(name) + + Args: + name (str): Required. The unique name of the requested cluster. Values are of + the form ``projects/{project}/instances/{instance}/clusters/{cluster}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Cluster` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_cluster" not in self._inner_api_calls: + self._inner_api_calls[ + "get_cluster" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_cluster, + default_retry=self._method_configs["GetCluster"].retry, + default_timeout=self._method_configs["GetCluster"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.GetClusterRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_cluster"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def list_clusters( + self, + parent, + page_token=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Lists information about clusters in an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> response = client.list_clusters(parent) + + Args: + parent (str): Required. The unique name of the instance for which a list of + clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use ``{instance} = '-'`` to + list Clusters for all Instances in a project, e.g., + ``projects/myproject/instances/-``. + page_token (str): DEPRECATED: This field is unused and ignored. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.ListClustersResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "list_clusters" not in self._inner_api_calls: + self._inner_api_calls[ + "list_clusters" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_clusters, + default_retry=self._method_configs["ListClusters"].retry, + default_timeout=self._method_configs["ListClusters"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.ListClustersRequest( + parent=parent, page_token=page_token, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["list_clusters"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def update_cluster( + self, + serve_nodes, + name=None, + location=None, + state=None, + default_storage_type=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Updates a cluster within an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> # TODO: Initialize `serve_nodes`: + >>> serve_nodes = 0 + >>> + >>> response = client.update_cluster(serve_nodes) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + serve_nodes (int): Required. The number of nodes allocated to this cluster. More nodes enable + higher throughput and more consistent performance. + name (str): The unique name of the cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. + location (str): (``CreationOnly``) The location where this cluster's nodes and + storage reside. For best performance, clients should be located as close + as possible to this cluster. Currently only zones are supported, so + values should be of the form ``projects/{project}/locations/{zone}``. + state (~google.cloud.bigtable_admin_v2.types.State): The current state of the cluster. + default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) The type of storage used by this cluster to serve + its parent instance's tables, unless explicitly overridden. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "update_cluster" not in self._inner_api_calls: + self._inner_api_calls[ + "update_cluster" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_cluster, + default_retry=self._method_configs["UpdateCluster"].retry, + default_timeout=self._method_configs["UpdateCluster"].timeout, + client_info=self._client_info, + ) + + request = instance_pb2.Cluster( + serve_nodes=serve_nodes, + name=name, + location=location, + state=state, + default_storage_type=default_storage_type, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["update_cluster"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + instance_pb2.Cluster, + metadata_type=bigtable_instance_admin_pb2.UpdateClusterMetadata, + ) + + def delete_cluster( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Deletes a cluster from an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> + >>> client.delete_cluster(name) + + Args: + name (str): Required. The unique name of the cluster to be deleted. Values are + of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "delete_cluster" not in self._inner_api_calls: + self._inner_api_calls[ + "delete_cluster" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_cluster, + default_retry=self._method_configs["DeleteCluster"].retry, + default_timeout=self._method_configs["DeleteCluster"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + self._inner_api_calls["delete_cluster"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def create_app_profile( + self, + parent, + app_profile_id, + app_profile, + ignore_warnings=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Creates an app profile within an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize `app_profile_id`: + >>> app_profile_id = '' + >>> + >>> # TODO: Initialize `app_profile`: + >>> app_profile = {} + >>> + >>> response = client.create_app_profile(parent, app_profile_id, app_profile) + + Args: + parent (str): Required. The unique name of the instance in which to create the new + app profile. Values are of the form + ``projects/{project}/instances/{instance}``. + app_profile_id (str): Required. The ID to be used when referring to the new app profile + within its instance, e.g., just ``myprofile`` rather than + ``projects/myproject/instances/myinstance/appProfiles/myprofile``. + app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` + ignore_warnings (bool): If true, ignore safety checks when creating the app profile. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "create_app_profile" not in self._inner_api_calls: + self._inner_api_calls[ + "create_app_profile" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_app_profile, + default_retry=self._method_configs["CreateAppProfile"].retry, + default_timeout=self._method_configs["CreateAppProfile"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.CreateAppProfileRequest( + parent=parent, + app_profile_id=app_profile_id, + app_profile=app_profile, + ignore_warnings=ignore_warnings, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["create_app_profile"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def get_app_profile( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets information about an app profile. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') + >>> + >>> response = client.get_app_profile(name) + + Args: + name (str): Required. The unique name of the requested app profile. Values are + of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_app_profile" not in self._inner_api_calls: + self._inner_api_calls[ + "get_app_profile" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_app_profile, + default_retry=self._method_configs["GetAppProfile"].retry, + default_timeout=self._method_configs["GetAppProfile"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_app_profile"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def list_app_profiles( + self, + parent, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Lists information about app profiles in an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # Iterate over all results + >>> for element in client.list_app_profiles(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_app_profiles(parent).pages: + ... for element in page: + ... # process element + ... pass + + Args: + parent (str): Required. The unique name of the instance for which a list of app + profiles is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use ``{instance} = '-'`` to + list AppProfiles for all Instances in a project, e.g., + ``projects/myproject/instances/-``. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instances. + You can also iterate over the pages of the response + using its `pages` property. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "list_app_profiles" not in self._inner_api_calls: + self._inner_api_calls[ + "list_app_profiles" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_app_profiles, + default_retry=self._method_configs["ListAppProfiles"].retry, + default_timeout=self._method_configs["ListAppProfiles"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.ListAppProfilesRequest( + parent=parent, page_size=page_size, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_app_profiles"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="app_profiles", + request_token_field="page_token", + response_token_field="next_page_token", + ) + return iterator + + def update_app_profile( + self, + app_profile, + update_mask, + ignore_warnings=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Updates an app profile within an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> # TODO: Initialize `app_profile`: + >>> app_profile = {} + >>> + >>> # TODO: Initialize `update_mask`: + >>> update_mask = {} + >>> + >>> response = client.update_app_profile(app_profile, update_mask) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): Required. The app profile which will (partially) replace the current value. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` + update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of app profile fields which should be replaced. + If unset, all fields will be replaced. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` + ignore_warnings (bool): If true, ignore safety checks when updating the app profile. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "update_app_profile" not in self._inner_api_calls: + self._inner_api_calls[ + "update_app_profile" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_app_profile, + default_retry=self._method_configs["UpdateAppProfile"].retry, + default_timeout=self._method_configs["UpdateAppProfile"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( + app_profile=app_profile, + update_mask=update_mask, + ignore_warnings=ignore_warnings, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("app_profile.name", app_profile.name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["update_app_profile"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + instance_pb2.AppProfile, + metadata_type=bigtable_instance_admin_pb2.UpdateAppProfileMetadata, + ) + + def delete_app_profile( + self, + name, + ignore_warnings=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Deletes an app profile from an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') + >>> + >>> client.delete_app_profile(name) + + Args: + name (str): Required. The unique name of the app profile to be deleted. Values + are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + ignore_warnings (bool): If true, ignore safety checks when deleting the app profile. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "delete_app_profile" not in self._inner_api_calls: + self._inner_api_calls[ + "delete_app_profile" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_app_profile, + default_retry=self._method_configs["DeleteAppProfile"].retry, + default_timeout=self._method_configs["DeleteAppProfile"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( + name=name, ignore_warnings=ignore_warnings, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + self._inner_api_calls["delete_app_profile"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def get_iam_policy( + self, + resource, + options_=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets the access control policy for an instance resource. Returns an empty + policy if an instance exists but does not have a policy set. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> # TODO: Initialize `resource`: + >>> resource = '' + >>> + >>> response = client.get_iam_policy(resource) + + Args: + resource (str): REQUIRED: The resource for which the policy is being requested. + See the operation documentation for the appropriate value for this field. + options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to + ``GetIamPolicy``. This field is only used by Cloud IAM. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_iam_policy" not in self._inner_api_calls: + self._inner_api_calls[ + "get_iam_policy" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_iam_policy, + default_retry=self._method_configs["GetIamPolicy"].retry, + default_timeout=self._method_configs["GetIamPolicy"].timeout, + client_info=self._client_info, + ) + + request = iam_policy_pb2.GetIamPolicyRequest( + resource=resource, options=options_, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("resource", resource)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_iam_policy"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def set_iam_policy( + self, + resource, + policy, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Sets the access control policy on an instance resource. Replaces any + existing policy. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> # TODO: Initialize `resource`: + >>> resource = '' + >>> + >>> # TODO: Initialize `policy`: + >>> policy = {} + >>> + >>> response = client.set_iam_policy(resource, policy) + + Args: + resource (str): REQUIRED: The resource for which the policy is being specified. + See the operation documentation for the appropriate value for this field. + policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The + size of the policy is limited to a few 10s of KB. An empty policy is a + valid policy but certain Cloud Platform services (such as Projects) + might reject them. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Policy` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "set_iam_policy" not in self._inner_api_calls: + self._inner_api_calls[ + "set_iam_policy" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.set_iam_policy, + default_retry=self._method_configs["SetIamPolicy"].retry, + default_timeout=self._method_configs["SetIamPolicy"].timeout, + client_info=self._client_info, + ) + + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("resource", resource)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["set_iam_policy"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def test_iam_permissions( + self, + resource, + permissions, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Returns permissions that the caller has on the specified instance resource. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> # TODO: Initialize `resource`: + >>> resource = '' + >>> + >>> # TODO: Initialize `permissions`: + >>> permissions = [] + >>> + >>> response = client.test_iam_permissions(resource, permissions) + + Args: + resource (str): REQUIRED: The resource for which the policy detail is being requested. + See the operation documentation for the appropriate value for this field. + permissions (list[str]): The set of permissions to check for the ``resource``. Permissions + with wildcards (such as '*' or 'storage.*') are not allowed. For more + information see `IAM + Overview `__. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "test_iam_permissions" not in self._inner_api_calls: + self._inner_api_calls[ + "test_iam_permissions" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.test_iam_permissions, + default_retry=self._method_configs["TestIamPermissions"].retry, + default_timeout=self._method_configs["TestIamPermissions"].timeout, + client_info=self._client_info, + ) + + request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("resource", resource)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["test_iam_permissions"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py new file mode 100644 index 000000000..b2ec35e01 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py @@ -0,0 +1,136 @@ +config = { + "interfaces": { + "google.bigtable.admin.v2.BigtableInstanceAdmin": { + "retry_codes": { + "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], + "non_idempotent": [], + }, + "retry_params": { + "idempotent_params": { + "initial_retry_delay_millis": 1000, + "retry_delay_multiplier": 2.0, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000, + }, + "non_idempotent_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 1.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 60000, + }, + "non_idempotent_heavy_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 1.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + }, + "methods": { + "CreateInstance": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_heavy_params", + }, + "GetInstance": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "ListInstances": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "UpdateInstance": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "PartialUpdateInstance": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "DeleteInstance": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "CreateCluster": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "GetCluster": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "ListClusters": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "UpdateCluster": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "DeleteCluster": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "CreateAppProfile": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "GetAppProfile": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "ListAppProfiles": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "UpdateAppProfile": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "DeleteAppProfile": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "GetIamPolicy": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "SetIamPolicy": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "TestIamPermissions": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + }, + } + } +} diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py new file mode 100644 index 000000000..db5528e9f --- /dev/null +++ b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -0,0 +1,2292 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Accesses the google.bigtable.admin.v2 BigtableTableAdmin API.""" + +import functools +import pkg_resources +import warnings + +from google.oauth2 import service_account +import google.api_core.client_options +import google.api_core.gapic_v1.client_info +import google.api_core.gapic_v1.config +import google.api_core.gapic_v1.method +import google.api_core.gapic_v1.routing_header +import google.api_core.grpc_helpers +import google.api_core.operation +import google.api_core.operations_v1 +import google.api_core.page_iterator +import google.api_core.path_template +import google.api_core.protobuf_helpers +import grpc + +from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client_config +from google.cloud.bigtable_admin_v2.gapic import enums +from google.cloud.bigtable_admin_v2.gapic.transports import ( + bigtable_table_admin_grpc_transport, +) +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2_grpc +from google.cloud.bigtable_admin_v2.proto import instance_pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import options_pb2 +from google.iam.v1 import policy_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import duration_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 + + +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + "google-cloud-bigtable-admin", +).version + + +class BigtableTableAdminClient(object): + """ + Service for creating, configuring, and deleting Cloud Bigtable tables. + + + Provides access to the table schemas only, not the data stored within + the tables. + """ + + SERVICE_ADDRESS = "bigtableadmin.googleapis.com:443" + """The default address of the service.""" + + # The name of the interface for this client. This is the key used to + # find the method configuration in the client_config dictionary. + _INTERFACE_NAME = "google.bigtable.admin.v2.BigtableTableAdmin" + + @classmethod + def from_service_account_file(cls, filename, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableTableAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @classmethod + def backup_path(cls, project, instance, cluster, backup): + """Return a fully-qualified backup string.""" + return google.api_core.path_template.expand( + "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}", + project=project, + instance=instance, + cluster=cluster, + backup=backup, + ) + + @classmethod + def cluster_path(cls, project, instance, cluster): + """Return a fully-qualified cluster string.""" + return google.api_core.path_template.expand( + "projects/{project}/instances/{instance}/clusters/{cluster}", + project=project, + instance=instance, + cluster=cluster, + ) + + @classmethod + def instance_path(cls, project, instance): + """Return a fully-qualified instance string.""" + return google.api_core.path_template.expand( + "projects/{project}/instances/{instance}", + project=project, + instance=instance, + ) + + @classmethod + def snapshot_path(cls, project, instance, cluster, snapshot): + """Return a fully-qualified snapshot string.""" + return google.api_core.path_template.expand( + "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}", + project=project, + instance=instance, + cluster=cluster, + snapshot=snapshot, + ) + + @classmethod + def table_path(cls, project, instance, table): + """Return a fully-qualified table string.""" + return google.api_core.path_template.expand( + "projects/{project}/instances/{instance}/tables/{table}", + project=project, + instance=instance, + table=table, + ) + + def __init__( + self, + transport=None, + channel=None, + credentials=None, + client_config=None, + client_info=None, + client_options=None, + ): + """Constructor. + + Args: + transport (Union[~.BigtableTableAdminGrpcTransport, + Callable[[~.Credentials, type], ~.BigtableTableAdminGrpcTransport]): A transport + instance, responsible for actually making the API calls. + The default transport uses the gRPC protocol. + This argument may also be a callable which returns a + transport instance. Callables will be sent the credentials + as the first argument and the default transport class as + the second argument. + channel (grpc.Channel): DEPRECATED. A ``Channel`` instance + through which to make calls. This argument is mutually exclusive + with ``credentials``; providing both will raise an exception. + credentials (google.auth.credentials.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is mutually exclusive with providing a + transport instance to ``transport``; doing so will raise + an exception. + client_config (dict): DEPRECATED. A dictionary of call options for + each method. If not specified, the default configuration is used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + client_options (Union[dict, google.api_core.client_options.ClientOptions]): + Client options used to set user options on the client. API Endpoint + should be set through client_options. + """ + # Raise deprecation warnings for things we want to go away. + if client_config is not None: + warnings.warn( + "The `client_config` argument is deprecated.", + PendingDeprecationWarning, + stacklevel=2, + ) + else: + client_config = bigtable_table_admin_client_config.config + + if channel: + warnings.warn( + "The `channel` argument is deprecated; use " "`transport` instead.", + PendingDeprecationWarning, + stacklevel=2, + ) + + api_endpoint = self.SERVICE_ADDRESS + if client_options: + if type(client_options) == dict: + client_options = google.api_core.client_options.from_dict( + client_options + ) + if client_options.api_endpoint: + api_endpoint = client_options.api_endpoint + + # Instantiate the transport. + # The transport is responsible for handling serialization and + # deserialization and actually sending data to the service. + if transport: + if callable(transport): + self.transport = transport( + credentials=credentials, + default_class=bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport, + address=api_endpoint, + ) + else: + if credentials: + raise ValueError( + "Received both a transport instance and " + "credentials; these are mutually exclusive." + ) + self.transport = transport + else: + self.transport = bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport( + address=api_endpoint, channel=channel, credentials=credentials, + ) + + if client_info is None: + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, + ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION + self._client_info = client_info + + # Parse out the default settings for retry and timeout for each RPC + # from the client configuration. + # (Ordinarily, these are the defaults specified in the `*_config.py` + # file next to this one.) + self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( + client_config["interfaces"][self._INTERFACE_NAME], + ) + + # Save a dictionary of cached API call functions. + # These are the actual callables which invoke the proper + # transport methods, wrapped with `wrap_method` to add retry, + # timeout, and the like. + self._inner_api_calls = {} + + # Service calls + def create_table( + self, + parent, + table_id, + table, + initial_splits=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize `table_id`: + >>> table_id = '' + >>> + >>> # TODO: Initialize `table`: + >>> table = {} + >>> + >>> response = client.create_table(parent, table_id, table) + + Args: + parent (str): Required. The unique name of the instance in which to create the + table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id (str): Required. The name by which the new table should be referred to + within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. Maximum 50 characters. + table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): Required. The Table to create. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Table` + initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split + the table into several tablets (tablets are similar to HBase regions). + Given two split keys, ``s1`` and ``s2``, three tablets will be created, + spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. + + Example: + + - Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",`` + ``"other", "zz"]`` + - initial_split_keys := + ``["apple", "customer_1", "customer_2", "other"]`` + - Key assignment: + + - Tablet 1 ``[, apple) => {"a"}.`` + - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` + - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` + - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` + - Tablet 5 ``[other, ) => {"other", "zz"}.`` + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Split` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "create_table" not in self._inner_api_calls: + self._inner_api_calls[ + "create_table" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_table, + default_retry=self._method_configs["CreateTable"].retry, + default_timeout=self._method_configs["CreateTable"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.CreateTableRequest( + parent=parent, + table_id=table_id, + table=table, + initial_splits=initial_splits, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["create_table"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def create_table_from_snapshot( + self, + parent, + table_id, + source_snapshot, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Creates a new table from the specified snapshot. The target table must + not exist. The snapshot and the table must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize `table_id`: + >>> table_id = '' + >>> source_snapshot = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') + >>> + >>> response = client.create_table_from_snapshot(parent, table_id, source_snapshot) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + parent (str): Required. The unique name of the instance in which to create the + table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id (str): Required. The name by which the new table should be referred to + within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. + source_snapshot (str): Required. The unique name of the snapshot from which to restore the + table. The snapshot and the table must be in the same instance. Values + are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "create_table_from_snapshot" not in self._inner_api_calls: + self._inner_api_calls[ + "create_table_from_snapshot" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_table_from_snapshot, + default_retry=self._method_configs["CreateTableFromSnapshot"].retry, + default_timeout=self._method_configs["CreateTableFromSnapshot"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( + parent=parent, table_id=table_id, source_snapshot=source_snapshot, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["create_table_from_snapshot"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + table_pb2.Table, + metadata_type=bigtable_table_admin_pb2.CreateTableFromSnapshotMetadata, + ) + + def list_tables( + self, + parent, + view=None, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Lists all tables served from a specified instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # Iterate over all results + >>> for element in client.list_tables(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_tables(parent).pages: + ... for element in page: + ... # process element + ... pass + + Args: + parent (str): Required. The unique name of the instance for which tables should be + listed. Values are of the form + ``projects/{project}/instances/{instance}``. + view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. Only + NAME_ONLY view (default) and REPLICATION_VIEW are supported. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Table` instances. + You can also iterate over the pages of the response + using its `pages` property. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "list_tables" not in self._inner_api_calls: + self._inner_api_calls[ + "list_tables" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_tables, + default_retry=self._method_configs["ListTables"].retry, + default_timeout=self._method_configs["ListTables"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.ListTablesRequest( + parent=parent, view=view, page_size=page_size, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_tables"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="tables", + request_token_field="page_token", + response_token_field="next_page_token", + ) + return iterator + + def get_table( + self, + name, + view=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets metadata information about the specified table. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> response = client.get_table(name) + + Args: + name (str): Required. The unique name of the requested table. Values are of the + form ``projects/{project}/instances/{instance}/tables/{table}``. + view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned table's fields. Defaults to + ``SCHEMA_VIEW`` if unspecified. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_table" not in self._inner_api_calls: + self._inner_api_calls[ + "get_table" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_table, + default_retry=self._method_configs["GetTable"].retry, + default_timeout=self._method_configs["GetTable"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.GetTableRequest(name=name, view=view,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_table"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def delete_table( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Permanently deletes a specified table and all of its data. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> client.delete_table(name) + + Args: + name (str): Required. The unique name of the table to be deleted. Values are of + the form ``projects/{project}/instances/{instance}/tables/{table}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "delete_table" not in self._inner_api_calls: + self._inner_api_calls[ + "delete_table" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_table, + default_retry=self._method_configs["DeleteTable"].retry, + default_timeout=self._method_configs["DeleteTable"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.DeleteTableRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + self._inner_api_calls["delete_table"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def modify_column_families( + self, + name, + modifications, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Performs a series of column family modifications on the specified table. + Either all or none of the modifications will occur before this method + returns, but data requests received prior to that point may see a table + where only some modifications have taken effect. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize `modifications`: + >>> modifications = [] + >>> + >>> response = client.modify_column_families(name, modifications) + + Args: + name (str): Required. The unique name of the table whose families should be + modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Required. Modifications to be atomically applied to the specified table's + families. Entries are applied in order, meaning that earlier modifications + can be masked by later ones (in the case of repeated updates to the same + family, for example). + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Modification` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "modify_column_families" not in self._inner_api_calls: + self._inner_api_calls[ + "modify_column_families" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.modify_column_families, + default_retry=self._method_configs["ModifyColumnFamilies"].retry, + default_timeout=self._method_configs["ModifyColumnFamilies"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( + name=name, modifications=modifications, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["modify_column_families"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def drop_row_range( + self, + name, + row_key_prefix=None, + delete_all_data_from_table=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> client.drop_row_range(name) + + Args: + name (str): Required. The unique name of the table on which to drop a range of + rows. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be + zero length. + delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "drop_row_range" not in self._inner_api_calls: + self._inner_api_calls[ + "drop_row_range" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.drop_row_range, + default_retry=self._method_configs["DropRowRange"].retry, + default_timeout=self._method_configs["DropRowRange"].timeout, + client_info=self._client_info, + ) + + # Sanity check: We have some fields which are mutually exclusive; + # raise ValueError if more than one is sent. + google.api_core.protobuf_helpers.check_oneof( + row_key_prefix=row_key_prefix, + delete_all_data_from_table=delete_all_data_from_table, + ) + + request = bigtable_table_admin_pb2.DropRowRangeRequest( + name=name, + row_key_prefix=row_key_prefix, + delete_all_data_from_table=delete_all_data_from_table, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + self._inner_api_calls["drop_row_range"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def generate_consistency_token( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Generates a consistency token for a Table, which can be used in + CheckConsistency to check whether mutations to the table that finished + before this call started have been replicated. The tokens will be available + for 90 days. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> response = client.generate_consistency_token(name) + + Args: + name (str): Required. The unique name of the Table for which to create a + consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "generate_consistency_token" not in self._inner_api_calls: + self._inner_api_calls[ + "generate_consistency_token" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.generate_consistency_token, + default_retry=self._method_configs["GenerateConsistencyToken"].retry, + default_timeout=self._method_configs[ + "GenerateConsistencyToken" + ].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["generate_consistency_token"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def check_consistency( + self, + name, + consistency_token, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Checks replication consistency based on a consistency token, that is, if + replication has caught up based on the conditions specified in the token + and the check request. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize `consistency_token`: + >>> consistency_token = '' + >>> + >>> response = client.check_consistency(name, consistency_token) + + Args: + name (str): Required. The unique name of the Table for which to check + replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + consistency_token (str): Required. The token created using GenerateConsistencyToken for the Table. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "check_consistency" not in self._inner_api_calls: + self._inner_api_calls[ + "check_consistency" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.check_consistency, + default_retry=self._method_configs["CheckConsistency"].retry, + default_timeout=self._method_configs["CheckConsistency"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.CheckConsistencyRequest( + name=name, consistency_token=consistency_token, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["check_consistency"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def get_iam_policy( + self, + resource, + options_=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets the access control policy for a resource. + Returns an empty policy if the resource exists but does not have a policy + set. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> # TODO: Initialize `resource`: + >>> resource = '' + >>> + >>> response = client.get_iam_policy(resource) + + Args: + resource (str): REQUIRED: The resource for which the policy is being requested. + See the operation documentation for the appropriate value for this field. + options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to + ``GetIamPolicy``. This field is only used by Cloud IAM. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_iam_policy" not in self._inner_api_calls: + self._inner_api_calls[ + "get_iam_policy" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_iam_policy, + default_retry=self._method_configs["GetIamPolicy"].retry, + default_timeout=self._method_configs["GetIamPolicy"].timeout, + client_info=self._client_info, + ) + + request = iam_policy_pb2.GetIamPolicyRequest( + resource=resource, options=options_, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("resource", resource)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_iam_policy"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def set_iam_policy( + self, + resource, + policy, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Sets the access control policy on a Table or Backup resource. + Replaces any existing policy. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> # TODO: Initialize `resource`: + >>> resource = '' + >>> + >>> # TODO: Initialize `policy`: + >>> policy = {} + >>> + >>> response = client.set_iam_policy(resource, policy) + + Args: + resource (str): REQUIRED: The resource for which the policy is being specified. + See the operation documentation for the appropriate value for this field. + policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The + size of the policy is limited to a few 10s of KB. An empty policy is a + valid policy but certain Cloud Platform services (such as Projects) + might reject them. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Policy` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "set_iam_policy" not in self._inner_api_calls: + self._inner_api_calls[ + "set_iam_policy" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.set_iam_policy, + default_retry=self._method_configs["SetIamPolicy"].retry, + default_timeout=self._method_configs["SetIamPolicy"].timeout, + client_info=self._client_info, + ) + + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("resource", resource)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["set_iam_policy"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def test_iam_permissions( + self, + resource, + permissions, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Returns permissions that the caller has on the specified table resource. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> # TODO: Initialize `resource`: + >>> resource = '' + >>> + >>> # TODO: Initialize `permissions`: + >>> permissions = [] + >>> + >>> response = client.test_iam_permissions(resource, permissions) + + Args: + resource (str): REQUIRED: The resource for which the policy detail is being requested. + See the operation documentation for the appropriate value for this field. + permissions (list[str]): The set of permissions to check for the ``resource``. Permissions + with wildcards (such as '*' or 'storage.*') are not allowed. For more + information see `IAM + Overview `__. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "test_iam_permissions" not in self._inner_api_calls: + self._inner_api_calls[ + "test_iam_permissions" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.test_iam_permissions, + default_retry=self._method_configs["TestIamPermissions"].retry, + default_timeout=self._method_configs["TestIamPermissions"].timeout, + client_info=self._client_info, + ) + + request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("resource", resource)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["test_iam_permissions"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def snapshot_table( + self, + name, + cluster, + snapshot_id, + ttl=None, + description=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Creates a new snapshot in the specified cluster from the specified + source table. The cluster and the table must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> cluster = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> + >>> # TODO: Initialize `snapshot_id`: + >>> snapshot_id = '' + >>> + >>> response = client.snapshot_table(name, cluster, snapshot_id) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + name (str): Required. The unique name of the table to have the snapshot taken. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + cluster (str): Required. The name of the cluster where the snapshot will be created + in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + snapshot_id (str): Required. The ID by which the new snapshot should be referred to + within the parent cluster, e.g., ``mysnapshot`` of the form: + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + ttl (Union[dict, ~google.cloud.bigtable_admin_v2.types.Duration]): The amount of time that the new snapshot can stay active after it is + created. Once 'ttl' expires, the snapshot will get deleted. The maximum + amount of time a snapshot can stay active is 7 days. If 'ttl' is not + specified, the default value of 24 hours will be used. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Duration` + description (str): Description of the snapshot. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "snapshot_table" not in self._inner_api_calls: + self._inner_api_calls[ + "snapshot_table" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.snapshot_table, + default_retry=self._method_configs["SnapshotTable"].retry, + default_timeout=self._method_configs["SnapshotTable"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.SnapshotTableRequest( + name=name, + cluster=cluster, + snapshot_id=snapshot_id, + ttl=ttl, + description=description, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["snapshot_table"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + table_pb2.Snapshot, + metadata_type=bigtable_table_admin_pb2.SnapshotTableMetadata, + ) + + def get_snapshot( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets metadata information about the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') + >>> + >>> response = client.get_snapshot(name) + + Args: + name (str): Required. The unique name of the requested snapshot. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_snapshot" not in self._inner_api_calls: + self._inner_api_calls[ + "get_snapshot" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_snapshot, + default_retry=self._method_configs["GetSnapshot"].retry, + default_timeout=self._method_configs["GetSnapshot"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_snapshot"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def list_snapshots( + self, + parent, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Lists all snapshots associated with the specified cluster. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> + >>> # Iterate over all results + >>> for element in client.list_snapshots(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_snapshots(parent).pages: + ... for element in page: + ... # process element + ... pass + + Args: + parent (str): Required. The unique name of the cluster for which snapshots should + be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. Use + ``{cluster} = '-'`` to list snapshots for all clusters in an instance, + e.g., ``projects/{project}/instances/{instance}/clusters/-``. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instances. + You can also iterate over the pages of the response + using its `pages` property. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "list_snapshots" not in self._inner_api_calls: + self._inner_api_calls[ + "list_snapshots" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_snapshots, + default_retry=self._method_configs["ListSnapshots"].retry, + default_timeout=self._method_configs["ListSnapshots"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.ListSnapshotsRequest( + parent=parent, page_size=page_size, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_snapshots"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="snapshots", + request_token_field="page_token", + response_token_field="next_page_token", + ) + return iterator + + def delete_snapshot( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Permanently deletes the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') + >>> + >>> client.delete_snapshot(name) + + Args: + name (str): Required. The unique name of the snapshot to be deleted. Values are + of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "delete_snapshot" not in self._inner_api_calls: + self._inner_api_calls[ + "delete_snapshot" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_snapshot, + default_retry=self._method_configs["DeleteSnapshot"].retry, + default_timeout=self._method_configs["DeleteSnapshot"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + self._inner_api_calls["delete_snapshot"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def create_backup( + self, + parent, + backup_id, + backup, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Starts creating a new Cloud Bigtable Backup. The returned backup + ``long-running operation`` can be used to track creation of the backup. + The ``metadata`` field type is ``CreateBackupMetadata``. The + ``response`` field type is ``Backup``, if successful. Cancelling the + returned operation will stop the creation and delete the backup. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> + >>> # TODO: Initialize `backup_id`: + >>> backup_id = '' + >>> + >>> # TODO: Initialize `backup`: + >>> backup = {} + >>> + >>> response = client.create_backup(parent, backup_id, backup) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + parent (str): Required. This must be one of the clusters in the instance in which + this table is located. The backup will be stored in this cluster. Values + are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + backup_id (str): Required. The id of the backup to be created. The ``backup_id`` + along with the parent ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup name, of the + form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in length and match the + regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to create. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Backup` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "create_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "create_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_backup, + default_retry=self._method_configs["CreateBackup"].retry, + default_timeout=self._method_configs["CreateBackup"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.CreateBackupRequest( + parent=parent, backup_id=backup_id, backup=backup, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["create_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + table_pb2.Backup, + metadata_type=bigtable_table_admin_pb2.CreateBackupMetadata, + ) + + def get_backup( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets metadata on a pending or completed Cloud Bigtable Backup. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') + >>> + >>> response = client.get_backup(name) + + Args: + name (str): Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "get_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_backup, + default_retry=self._method_configs["GetBackup"].retry, + default_timeout=self._method_configs["GetBackup"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.GetBackupRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def list_backups( + self, + parent, + filter_=None, + order_by=None, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Lists Cloud Bigtable backups. Returns both completed and pending + backups. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> + >>> # Iterate over all results + >>> for element in client.list_backups(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_backups(parent).pages: + ... for element in page: + ... # process element + ... pass + + Args: + parent (str): Required. The cluster to list backups from. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. Use + ``{cluster} = '-'`` to list backups for all clusters in an instance, + e.g., ``projects/{project}/instances/{instance}/clusters/-``. + filter_ (str): A filter expression that filters backups listed in the response. The + expression must specify the field name, a comparison operator, and the + value that you want to use for filtering. The value must be a string, a + number, or a boolean. The comparison operator must be <, >, <=, >=, !=, + =, or :. Colon ‘:’ represents a HAS operator which is roughly synonymous + with equality. Filter rules are case insensitive. + + The fields eligible for filtering are: + + - ``name`` + - ``source_table`` + - ``state`` + - ``start_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + - ``end_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + - ``expire_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + - ``size_bytes`` + + To filter on multiple expressions, provide each separate expression + within parentheses. By default, each expression is an AND expression. + However, you can include AND, OR, and NOT expressions explicitly. + + Some examples of using filters are: + + - ``name:"exact"`` --> The backup's name is the string "exact". + - ``name:howl`` --> The backup's name contains the string "howl". + - ``source_table:prod`` --> The source_table's name contains the string + "prod". + - ``state:CREATING`` --> The backup is pending creation. + - ``state:READY`` --> The backup is fully created and ready for use. + - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` --> The + backup name contains the string "howl" and start_time of the backup + is before 2018-03-28T14:50:00Z. + - ``size_bytes > 10000000000`` --> The backup's size is greater than + 10GB + order_by (str): An expression for specifying the sort order of the results of the + request. The string value should specify one or more fields in + ``Backup``. The full syntax is described at + https://aip.dev/132#ordering. + + Fields supported are: \* name \* source_table \* expire_time \* + start_time \* end_time \* size_bytes \* state + + For example, "start_time". The default sorting order is ascending. To + specify descending order for the field, a suffix " desc" should be + appended to the field name. For example, "start_time desc". Redundant + space characters in the syntax are insigificant. + + If order_by is empty, results will be sorted by ``start_time`` in + descending order starting from the most recently created backup. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Backup` instances. + You can also iterate over the pages of the response + using its `pages` property. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "list_backups" not in self._inner_api_calls: + self._inner_api_calls[ + "list_backups" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_backups, + default_retry=self._method_configs["ListBackups"].retry, + default_timeout=self._method_configs["ListBackups"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.ListBackupsRequest( + parent=parent, filter=filter_, order_by=order_by, page_size=page_size, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_backups"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="backups", + request_token_field="page_token", + response_token_field="next_page_token", + ) + return iterator + + def update_backup( + self, + backup, + update_mask, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Updates a pending or completed Cloud Bigtable Backup. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> # TODO: Initialize `backup`: + >>> backup = {} + >>> + >>> # TODO: Initialize `update_mask`: + >>> update_mask = {} + >>> + >>> response = client.update_backup(backup, update_mask) + + Args: + backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to update. ``backup.name``, and the fields to + be updated as specified by ``update_mask`` are required. Other fields + are ignored. Update is only supported for the following fields: + + - ``backup.expire_time``. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Backup` + update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. A mask specifying which fields (e.g. ``expire_time``) in + the Backup resource should be updated. This mask is relative to the + Backup resource, not to the request message. The field mask must always + be specified; this prevents any future fields from being erased + accidentally by clients that do not know about them. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "update_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "update_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_backup, + default_retry=self._method_configs["UpdateBackup"].retry, + default_timeout=self._method_configs["UpdateBackup"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.UpdateBackupRequest( + backup=backup, update_mask=update_mask, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("backup.name", backup.name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["update_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def delete_backup( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Deletes a pending or completed Cloud Bigtable backup. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') + >>> + >>> client.delete_backup(name) + + Args: + name (str): Required. Name of the backup to delete. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "delete_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "delete_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_backup, + default_retry=self._method_configs["DeleteBackup"].retry, + default_timeout=self._method_configs["DeleteBackup"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + self._inner_api_calls["delete_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def restore_table( + self, + parent=None, + table_id=None, + backup=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing the + backup. The returned table ``long-running operation`` can be used to + track the progress of the operation, and to cancel it. The ``metadata`` + field type is ``RestoreTableMetadata``. The ``response`` type is + ``Table``, if successful. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> response = client.restore_table() + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + parent (str): Required. The name of the instance in which to create the restored + table. This instance must be the parent of the source backup. Values are + of the form ``projects//instances/``. + table_id (str): Required. The id of the table to create and restore to. This table + must not already exist. The ``table_id`` appended to ``parent`` forms + the full table name of the form + ``projects//instances//tables/``. + backup (str): Name of the backup from which to restore. Values are of the form + ``projects//instances//clusters//backups/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "restore_table" not in self._inner_api_calls: + self._inner_api_calls[ + "restore_table" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.restore_table, + default_retry=self._method_configs["RestoreTable"].retry, + default_timeout=self._method_configs["RestoreTable"].timeout, + client_info=self._client_info, + ) + + # Sanity check: We have some fields which are mutually exclusive; + # raise ValueError if more than one is sent. + google.api_core.protobuf_helpers.check_oneof(backup=backup,) + + request = bigtable_table_admin_pb2.RestoreTableRequest( + parent=parent, table_id=table_id, backup=backup, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["restore_table"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + table_pb2.Table, + metadata_type=bigtable_table_admin_pb2.RestoreTableMetadata, + ) diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py new file mode 100644 index 000000000..db60047bd --- /dev/null +++ b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py @@ -0,0 +1,160 @@ +config = { + "interfaces": { + "google.bigtable.admin.v2.BigtableTableAdmin": { + "retry_codes": { + "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], + "non_idempotent": [], + }, + "retry_params": { + "idempotent_params": { + "initial_retry_delay_millis": 1000, + "retry_delay_multiplier": 2.0, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000, + }, + "non_idempotent_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 1.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 60000, + }, + "non_idempotent_heavy_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 1.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "drop_row_range_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 1.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 3600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 3600000, + "total_timeout_millis": 3600000, + }, + }, + "methods": { + "CreateTable": { + "timeout_millis": 130000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_heavy_params", + }, + "CreateTableFromSnapshot": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "ListTables": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "GetTable": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "DeleteTable": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "ModifyColumnFamilies": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_heavy_params", + }, + "DropRowRange": { + "timeout_millis": 900000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "drop_row_range_params", + }, + "GenerateConsistencyToken": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "CheckConsistency": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "GetIamPolicy": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "SetIamPolicy": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "TestIamPermissions": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "SnapshotTable": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "GetSnapshot": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "ListSnapshots": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "DeleteSnapshot": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "CreateBackup": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "GetBackup": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "ListBackups": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "UpdateBackup": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "DeleteBackup": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "RestoreTable": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + }, + } + } +} diff --git a/google/cloud/bigtable_admin_v2/gapic/enums.py b/google/cloud/bigtable_admin_v2/gapic/enums.py new file mode 100644 index 000000000..c71bee34b --- /dev/null +++ b/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -0,0 +1,213 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Wrappers for protocol buffer enum types.""" + +import enum + + +class RestoreSourceType(enum.IntEnum): + """ + Indicates the type of the restore source. + + Attributes: + RESTORE_SOURCE_TYPE_UNSPECIFIED (int): No restore associated. + BACKUP (int): A backup was used as the source of the restore. + """ + + RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 + BACKUP = 1 + + +class StorageType(enum.IntEnum): + """ + Storage media types for persisting Bigtable data. + + Attributes: + STORAGE_TYPE_UNSPECIFIED (int): The user did not specify a storage type. + SSD (int): Flash (SSD) storage should be used. + HDD (int): Magnetic drive (HDD) storage should be used. + """ + + STORAGE_TYPE_UNSPECIFIED = 0 + SSD = 1 + HDD = 2 + + +class Backup(object): + class State(enum.IntEnum): + """ + Indicates the current state of the backup. + + Attributes: + STATE_UNSPECIFIED (int): Not specified. + CREATING (int): The pending backup is still being created. Operations on the backup + may fail with ``FAILED_PRECONDITION`` in this state. + READY (int): The backup is complete and ready for use. + """ + + STATE_UNSPECIFIED = 0 + CREATING = 1 + READY = 2 + + +class Cluster(object): + class State(enum.IntEnum): + """ + Possible states of a cluster. + + Attributes: + STATE_NOT_KNOWN (int): The state of the cluster could not be determined. + READY (int): The cluster has been successfully created and is ready to serve requests. + CREATING (int): The cluster is currently being created, and may be destroyed + if the creation process encounters an error. + A cluster may not be able to serve requests while being created. + RESIZING (int): The cluster is currently being resized, and may revert to its previous + node count if the process encounters an error. + A cluster is still capable of serving requests while being resized, + but may exhibit performance as if its number of allocated nodes is + between the starting and requested states. + DISABLED (int): The cluster has no backing nodes. The data (tables) still + exist, but no operations can be performed on the cluster. + """ + + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + RESIZING = 3 + DISABLED = 4 + + +class Instance(object): + class State(enum.IntEnum): + """ + Possible states of an instance. + + Attributes: + STATE_NOT_KNOWN (int): The state of the instance could not be determined. + READY (int): The instance has been successfully created and can serve requests + to its tables. + CREATING (int): The instance is currently being created, and may be destroyed + if the creation process encounters an error. + """ + + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + class Type(enum.IntEnum): + """ + The type of the instance. + + Attributes: + TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an + instance, a ``PRODUCTION`` instance will be created. If set when + updating an instance, the type will be left unchanged. + PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set on + the cluster. + DEVELOPMENT (int): The instance is meant for development and testing purposes only; it + has no performance or uptime guarantees and is not covered by SLA. After + a development instance is created, it can be upgraded by updating the + instance to type ``PRODUCTION``. An instance created as a production + instance cannot be changed to a development instance. When creating a + development instance, ``serve_nodes`` on the cluster must not be set. + """ + + TYPE_UNSPECIFIED = 0 + PRODUCTION = 1 + DEVELOPMENT = 2 + + +class Snapshot(object): + class State(enum.IntEnum): + """ + Possible states of a snapshot. + + Attributes: + STATE_NOT_KNOWN (int): The state of the snapshot could not be determined. + READY (int): The snapshot has been successfully created and can serve all requests. + CREATING (int): The snapshot is currently being created, and may be destroyed if the + creation process encounters an error. A snapshot may not be restored to a + table while it is being created. + """ + + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + +class Table(object): + class TimestampGranularity(enum.IntEnum): + """ + Possible timestamp granularities to use when keeping multiple versions + of data in a table. + + Attributes: + TIMESTAMP_GRANULARITY_UNSPECIFIED (int): The user did not specify a granularity. Should not be returned. + When specified during table creation, MILLIS will be used. + MILLIS (int): The table keeps data versioned at a granularity of 1ms. + """ + + TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 + MILLIS = 1 + + class View(enum.IntEnum): + """ + Defines a view over a table's fields. + + Attributes: + VIEW_UNSPECIFIED (int): Uses the default view for each method as documented in its request. + NAME_ONLY (int): Only populates ``name``. + SCHEMA_VIEW (int): Only populates ``name`` and fields related to the table's schema. + REPLICATION_VIEW (int): Only populates ``name`` and fields related to the table's + replication state. + FULL (int): Populates all fields. + """ + + VIEW_UNSPECIFIED = 0 + NAME_ONLY = 1 + SCHEMA_VIEW = 2 + REPLICATION_VIEW = 3 + FULL = 4 + + class ClusterState(object): + class ReplicationState(enum.IntEnum): + """ + Table replication states. + + Attributes: + STATE_NOT_KNOWN (int): The replication state of the table is unknown in this cluster. + INITIALIZING (int): The cluster was recently created, and the table must finish copying + over pre-existing data from other clusters before it can begin + receiving live replication updates and serving Data API requests. + PLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this + cluster due to planned internal maintenance. + UNPLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this + cluster due to unplanned or emergency maintenance. + READY (int): The table can serve Data API requests from this cluster. Depending on + replication delay, reads may not immediately reflect the state of the + table in other clusters. + READY_OPTIMIZING (int): The table is fully created and ready for use after a restore, and is + being optimized for performance. When optimizations are complete, the + table will transition to ``READY`` state. + """ + + STATE_NOT_KNOWN = 0 + INITIALIZING = 1 + PLANNED_MAINTENANCE = 2 + UNPLANNED_MAINTENANCE = 3 + READY = 4 + READY_OPTIMIZING = 5 diff --git a/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py b/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py new file mode 100644 index 000000000..536629604 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -0,0 +1,380 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import google.api_core.grpc_helpers +import google.api_core.operations_v1 + +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc + + +class BigtableInstanceAdminGrpcTransport(object): + """gRPC transport class providing stubs for + google.bigtable.admin.v2 BigtableInstanceAdmin API. + + The transport provides access to the raw gRPC stubs, + which can be used to take advantage of advanced + features of gRPC. + """ + + # The scopes needed to make gRPC calls to all of the methods defined + # in this service. + _OAUTH_SCOPES = ( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ) + + def __init__( + self, channel=None, credentials=None, address="bigtableadmin.googleapis.com:443" + ): + """Instantiate the transport class. + + Args: + channel (grpc.Channel): A ``Channel`` instance through + which to make calls. This argument is mutually exclusive + with ``credentials``; providing both will raise an exception. + credentials (google.auth.credentials.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If none + are specified, the client will attempt to ascertain the + credentials from the environment. + address (str): The address where the service is hosted. + """ + # If both `channel` and `credentials` are specified, raise an + # exception (channels come with credentials baked in already). + if channel is not None and credentials is not None: + raise ValueError( + "The `channel` and `credentials` arguments are mutually " "exclusive.", + ) + + # Create the channel. + if channel is None: + channel = self.create_channel( + address=address, + credentials=credentials, + options={ + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, + }.items(), + ) + + self._channel = channel + + # gRPC uses objects called "stubs" that are bound to the + # channel and provide a basic method for each RPC. + self._stubs = { + "bigtable_instance_admin_stub": bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub( + channel + ), + } + + # Because this API includes a method that returns a + # long-running operation (proto: google.longrunning.Operation), + # instantiate an LRO client. + self._operations_client = google.api_core.operations_v1.OperationsClient( + channel + ) + + @classmethod + def create_channel( + cls, address="bigtableadmin.googleapis.com:443", credentials=None, **kwargs + ): + """Create and return a gRPC channel object. + + Args: + address (str): The host for the channel to use. + credentials (~.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + kwargs (dict): Keyword arguments, which are passed to the + channel creation. + + Returns: + grpc.Channel: A gRPC channel object. + """ + return google.api_core.grpc_helpers.create_channel( + address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs + ) + + @property + def channel(self): + """The gRPC channel used by the transport. + + Returns: + grpc.Channel: A gRPC channel object. + """ + return self._channel + + @property + def create_instance(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_instance`. + + Create an instance within a project. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].CreateInstance + + @property + def get_instance(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_instance`. + + Gets information about an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].GetInstance + + @property + def list_instances(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_instances`. + + Lists information about instances in a project. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].ListInstances + + @property + def update_instance(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_instance`. + + Updates an instance within a project. This method updates only the display + name and type for an Instance. To update other Instance properties, such as + labels, use PartialUpdateInstance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].UpdateInstance + + @property + def partial_update_instance(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.partial_update_instance`. + + Partially updates an instance within a project. This method can modify all + fields of an Instance and is the preferred way to update an Instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].PartialUpdateInstance + + @property + def delete_instance(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_instance`. + + Delete an instance from a project. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].DeleteInstance + + @property + def create_cluster(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_cluster`. + + Creates a cluster within an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].CreateCluster + + @property + def get_cluster(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_cluster`. + + Gets information about a cluster. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].GetCluster + + @property + def list_clusters(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_clusters`. + + Lists information about clusters in an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].ListClusters + + @property + def update_cluster(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_cluster`. + + Updates a cluster within an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].UpdateCluster + + @property + def delete_cluster(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_cluster`. + + Deletes a cluster from an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].DeleteCluster + + @property + def create_app_profile(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_app_profile`. + + Creates an app profile within an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].CreateAppProfile + + @property + def get_app_profile(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_app_profile`. + + Gets information about an app profile. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].GetAppProfile + + @property + def list_app_profiles(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_app_profiles`. + + Lists information about app profiles in an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].ListAppProfiles + + @property + def update_app_profile(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_app_profile`. + + Updates an app profile within an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].UpdateAppProfile + + @property + def delete_app_profile(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_app_profile`. + + Deletes an app profile from an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].DeleteAppProfile + + @property + def get_iam_policy(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_iam_policy`. + + Gets the access control policy for an instance resource. Returns an empty + policy if an instance exists but does not have a policy set. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].GetIamPolicy + + @property + def set_iam_policy(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.set_iam_policy`. + + Sets the access control policy on an instance resource. Replaces any + existing policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].SetIamPolicy + + @property + def test_iam_permissions(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.test_iam_permissions`. + + Returns permissions that the caller has on the specified instance resource. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].TestIamPermissions diff --git a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py new file mode 100644 index 000000000..281bad20a --- /dev/null +++ b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -0,0 +1,471 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import google.api_core.grpc_helpers +import google.api_core.operations_v1 + +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2_grpc + + +class BigtableTableAdminGrpcTransport(object): + """gRPC transport class providing stubs for + google.bigtable.admin.v2 BigtableTableAdmin API. + + The transport provides access to the raw gRPC stubs, + which can be used to take advantage of advanced + features of gRPC. + """ + + # The scopes needed to make gRPC calls to all of the methods defined + # in this service. + _OAUTH_SCOPES = ( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ) + + def __init__( + self, channel=None, credentials=None, address="bigtableadmin.googleapis.com:443" + ): + """Instantiate the transport class. + + Args: + channel (grpc.Channel): A ``Channel`` instance through + which to make calls. This argument is mutually exclusive + with ``credentials``; providing both will raise an exception. + credentials (google.auth.credentials.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If none + are specified, the client will attempt to ascertain the + credentials from the environment. + address (str): The address where the service is hosted. + """ + # If both `channel` and `credentials` are specified, raise an + # exception (channels come with credentials baked in already). + if channel is not None and credentials is not None: + raise ValueError( + "The `channel` and `credentials` arguments are mutually " "exclusive.", + ) + + # Create the channel. + if channel is None: + channel = self.create_channel( + address=address, + credentials=credentials, + options={ + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, + }.items(), + ) + + self._channel = channel + + # gRPC uses objects called "stubs" that are bound to the + # channel and provide a basic method for each RPC. + self._stubs = { + "bigtable_table_admin_stub": bigtable_table_admin_pb2_grpc.BigtableTableAdminStub( + channel + ), + } + + # Because this API includes a method that returns a + # long-running operation (proto: google.longrunning.Operation), + # instantiate an LRO client. + self._operations_client = google.api_core.operations_v1.OperationsClient( + channel + ) + + @classmethod + def create_channel( + cls, address="bigtableadmin.googleapis.com:443", credentials=None, **kwargs + ): + """Create and return a gRPC channel object. + + Args: + address (str): The host for the channel to use. + credentials (~.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + kwargs (dict): Keyword arguments, which are passed to the + channel creation. + + Returns: + grpc.Channel: A gRPC channel object. + """ + return google.api_core.grpc_helpers.create_channel( + address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs + ) + + @property + def channel(self): + """The gRPC channel used by the transport. + + Returns: + grpc.Channel: A gRPC channel object. + """ + return self._channel + + @property + def create_table(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table`. + + Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].CreateTable + + @property + def create_table_from_snapshot(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table_from_snapshot`. + + Creates a new table from the specified snapshot. The target table must + not exist. The snapshot and the table must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].CreateTableFromSnapshot + + @property + def list_tables(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_tables`. + + Lists all tables served from a specified instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].ListTables + + @property + def get_table(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_table`. + + Gets metadata information about the specified table. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].GetTable + + @property + def delete_table(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_table`. + + Permanently deletes a specified table and all of its data. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].DeleteTable + + @property + def modify_column_families(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.modify_column_families`. + + Performs a series of column family modifications on the specified table. + Either all or none of the modifications will occur before this method + returns, but data requests received prior to that point may see a table + where only some modifications have taken effect. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].ModifyColumnFamilies + + @property + def drop_row_range(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.drop_row_range`. + + Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].DropRowRange + + @property + def generate_consistency_token(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.generate_consistency_token`. + + Generates a consistency token for a Table, which can be used in + CheckConsistency to check whether mutations to the table that finished + before this call started have been replicated. The tokens will be available + for 90 days. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].GenerateConsistencyToken + + @property + def check_consistency(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.check_consistency`. + + Checks replication consistency based on a consistency token, that is, if + replication has caught up based on the conditions specified in the token + and the check request. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].CheckConsistency + + @property + def get_iam_policy(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_iam_policy`. + + Gets the access control policy for a resource. + Returns an empty policy if the resource exists but does not have a policy + set. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].GetIamPolicy + + @property + def set_iam_policy(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.set_iam_policy`. + + Sets the access control policy on a Table or Backup resource. + Replaces any existing policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].SetIamPolicy + + @property + def test_iam_permissions(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.test_iam_permissions`. + + Returns permissions that the caller has on the specified table resource. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].TestIamPermissions + + @property + def snapshot_table(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.snapshot_table`. + + Creates a new snapshot in the specified cluster from the specified + source table. The cluster and the table must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].SnapshotTable + + @property + def get_snapshot(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_snapshot`. + + Gets metadata information about the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].GetSnapshot + + @property + def list_snapshots(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_snapshots`. + + Lists all snapshots associated with the specified cluster. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].ListSnapshots + + @property + def delete_snapshot(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_snapshot`. + + Permanently deletes the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].DeleteSnapshot + + @property + def create_backup(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_backup`. + + Starts creating a new Cloud Bigtable Backup. The returned backup + ``long-running operation`` can be used to track creation of the backup. + The ``metadata`` field type is ``CreateBackupMetadata``. The + ``response`` field type is ``Backup``, if successful. Cancelling the + returned operation will stop the creation and delete the backup. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].CreateBackup + + @property + def get_backup(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_backup`. + + Gets metadata on a pending or completed Cloud Bigtable Backup. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].GetBackup + + @property + def list_backups(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_backups`. + + Lists Cloud Bigtable backups. Returns both completed and pending + backups. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].ListBackups + + @property + def update_backup(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.update_backup`. + + Updates a pending or completed Cloud Bigtable Backup. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].UpdateBackup + + @property + def delete_backup(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_backup`. + + Deletes a pending or completed Cloud Bigtable backup. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].DeleteBackup + + @property + def restore_table(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.restore_table`. + + Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing the + backup. The returned table ``long-running operation`` can be used to + track the progress of the operation, and to cancel it. The ``metadata`` + field type is ``RestoreTableMetadata``. The ``response`` type is + ``Table``, if successful. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].RestoreTable diff --git a/google/cloud/bigtable_admin_v2/proto/__init__.py b/google/cloud/bigtable_admin_v2/proto/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto index ca3aaed7a..8b19b5582 100644 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,6 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// syntax = "proto3"; @@ -564,11 +565,9 @@ message DeleteAppProfileRequest { } ]; - // Required. If true, ignore safety checks when deleting the app profile. - bool ignore_warnings = 2 [(google.api.field_behavior) = REQUIRED]; + // If true, ignore safety checks when deleting the app profile. + bool ignore_warnings = 2; } // The metadata for the Operation returned by UpdateAppProfile. -message UpdateAppProfileMetadata { - -} +message UpdateAppProfileMetadata {} diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py new file mode 100644 index 000000000..63590907a --- /dev/null +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -0,0 +1,2432 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 +from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2, +) +from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 +from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name="google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto", + package="google.bigtable.admin.v2", + syntax="proto3", + serialized_options=b'\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\nBgoogle/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/bigtable_admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xdb\x02\n\x15\x43reateInstanceRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x18\n\x0binstance_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12T\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntryB\x03\xe0\x41\x02\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01"L\n\x12GetInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"o\n\x14ListInstancesRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x8f\x01\n\x1cPartialUpdateInstanceRequest\x12\x39\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"O\n\x15\x44\x65leteInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"\xa2\x01\n\x14\x43reateClusterRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x17\n\ncluster_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.ClusterB\x03\xe0\x41\x02"J\n\x11GetClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"c\n\x13ListClustersRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"M\n\x14\x44\x65leteClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc9\x01\n\x17\x43reateAppProfileRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12>\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"P\n\x14GetAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile"y\n\x16ListAppProfilesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\xa8\x01\n\x17UpdateAppProfileRequest\x12>\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"l\n\x17\x44\x65leteAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08"\x1a\n\x18UpdateAppProfileMetadata2\x92\x1e\n\x15\x42igtableInstanceAdmin\x12\xda\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\xda\x41$parent,instance_id,instance,clusters\xca\x41"\n\x08Instance\x12\x16\x43reateInstanceMetadata\x12\x91\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance"0\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xa4\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse"2\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\xda\x41\x06parent\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xe8\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\xda\x41\x14instance,update_mask\xca\x41"\n\x08Instance\x12\x16UpdateInstanceMetadata\x12\x8b\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty"0\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xdc\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"|\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\xda\x41\x19parent,cluster_id,cluster\xca\x41 \n\x07\x43luster\x12\x15\x43reateClusterMetadata\x12\x99\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster";\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xac\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"=\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\xda\x41\x06parent\x12\xad\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"Z\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\xca\x41 \n\x07\x43luster\x12\x15UpdateClusterMetadata\x12\x94\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty";\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xd5\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"h\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\xda\x41!parent,app_profile_id,app_profile\x12\xa5\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile">\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\xb8\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\xda\x41\x06parent\x12\xfa\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"\x93\x01\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\xda\x41\x17\x61pp_profile,update_mask\xca\x41&\n\nAppProfile\x12\x18UpdateAppProfileMetadata\x12\x9d\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty">\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\x93\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"H\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\x9a\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"O\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xc5\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"Z\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\x9a\x03\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xf7\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xe2\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', + dependencies=[ + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.DESCRIPTOR, + google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, + google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, + google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, + google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, + google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, + google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + ], +) + + +_CREATEINSTANCEREQUEST_CLUSTERSENTRY = _descriptor.Descriptor( + name="ClustersEntry", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.key", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=b"8\001", + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=723, + serialized_end=805, +) + +_CREATEINSTANCEREQUEST = _descriptor.Descriptor( + name="CreateInstanceRequest", + full_name="google.bigtable.admin.v2.CreateInstanceRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="instance_id", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.instance_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="instance", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.instance", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="clusters", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.clusters", + index=3, + number=4, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY,], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=458, + serialized_end=805, +) + + +_GETINSTANCEREQUEST = _descriptor.Descriptor( + name="GetInstanceRequest", + full_name="google.bigtable.admin.v2.GetInstanceRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GetInstanceRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=807, + serialized_end=883, +) + + +_LISTINSTANCESREQUEST = _descriptor.Descriptor( + name="ListInstancesRequest", + full_name="google.bigtable.admin.v2.ListInstancesRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.ListInstancesRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.bigtable.admin.v2.ListInstancesRequest.page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=885, + serialized_end=996, +) + + +_LISTINSTANCESRESPONSE = _descriptor.Descriptor( + name="ListInstancesResponse", + full_name="google.bigtable.admin.v2.ListInstancesResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="instances", + full_name="google.bigtable.admin.v2.ListInstancesResponse.instances", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="failed_locations", + full_name="google.bigtable.admin.v2.ListInstancesResponse.failed_locations", + index=1, + number=2, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.bigtable.admin.v2.ListInstancesResponse.next_page_token", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=999, + serialized_end=1128, +) + + +_PARTIALUPDATEINSTANCEREQUEST = _descriptor.Descriptor( + name="PartialUpdateInstanceRequest", + full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="instance", + full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest.instance", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="update_mask", + full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest.update_mask", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1131, + serialized_end=1274, +) + + +_DELETEINSTANCEREQUEST = _descriptor.Descriptor( + name="DeleteInstanceRequest", + full_name="google.bigtable.admin.v2.DeleteInstanceRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DeleteInstanceRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1276, + serialized_end=1355, +) + + +_CREATECLUSTERREQUEST = _descriptor.Descriptor( + name="CreateClusterRequest", + full_name="google.bigtable.admin.v2.CreateClusterRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.CreateClusterRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="cluster_id", + full_name="google.bigtable.admin.v2.CreateClusterRequest.cluster_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="cluster", + full_name="google.bigtable.admin.v2.CreateClusterRequest.cluster", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1358, + serialized_end=1520, +) + + +_GETCLUSTERREQUEST = _descriptor.Descriptor( + name="GetClusterRequest", + full_name="google.bigtable.admin.v2.GetClusterRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GetClusterRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1522, + serialized_end=1596, +) + + +_LISTCLUSTERSREQUEST = _descriptor.Descriptor( + name="ListClustersRequest", + full_name="google.bigtable.admin.v2.ListClustersRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.ListClustersRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.bigtable.admin.v2.ListClustersRequest.page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1598, + serialized_end=1697, +) + + +_LISTCLUSTERSRESPONSE = _descriptor.Descriptor( + name="ListClustersResponse", + full_name="google.bigtable.admin.v2.ListClustersResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="clusters", + full_name="google.bigtable.admin.v2.ListClustersResponse.clusters", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="failed_locations", + full_name="google.bigtable.admin.v2.ListClustersResponse.failed_locations", + index=1, + number=2, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.bigtable.admin.v2.ListClustersResponse.next_page_token", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1699, + serialized_end=1825, +) + + +_DELETECLUSTERREQUEST = _descriptor.Descriptor( + name="DeleteClusterRequest", + full_name="google.bigtable.admin.v2.DeleteClusterRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DeleteClusterRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1827, + serialized_end=1904, +) + + +_CREATEINSTANCEMETADATA = _descriptor.Descriptor( + name="CreateInstanceMetadata", + full_name="google.bigtable.admin.v2.CreateInstanceMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="original_request", + full_name="google.bigtable.admin.v2.CreateInstanceMetadata.original_request", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="request_time", + full_name="google.bigtable.admin.v2.CreateInstanceMetadata.request_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="finish_time", + full_name="google.bigtable.admin.v2.CreateInstanceMetadata.finish_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1907, + serialized_end=2105, +) + + +_UPDATEINSTANCEMETADATA = _descriptor.Descriptor( + name="UpdateInstanceMetadata", + full_name="google.bigtable.admin.v2.UpdateInstanceMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="original_request", + full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.original_request", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="request_time", + full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.request_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="finish_time", + full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.finish_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2108, + serialized_end=2313, +) + + +_CREATECLUSTERMETADATA = _descriptor.Descriptor( + name="CreateClusterMetadata", + full_name="google.bigtable.admin.v2.CreateClusterMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="original_request", + full_name="google.bigtable.admin.v2.CreateClusterMetadata.original_request", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="request_time", + full_name="google.bigtable.admin.v2.CreateClusterMetadata.request_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="finish_time", + full_name="google.bigtable.admin.v2.CreateClusterMetadata.finish_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2316, + serialized_end=2512, +) + + +_UPDATECLUSTERMETADATA = _descriptor.Descriptor( + name="UpdateClusterMetadata", + full_name="google.bigtable.admin.v2.UpdateClusterMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="original_request", + full_name="google.bigtable.admin.v2.UpdateClusterMetadata.original_request", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="request_time", + full_name="google.bigtable.admin.v2.UpdateClusterMetadata.request_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="finish_time", + full_name="google.bigtable.admin.v2.UpdateClusterMetadata.finish_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2515, + serialized_end=2698, +) + + +_CREATEAPPPROFILEREQUEST = _descriptor.Descriptor( + name="CreateAppProfileRequest", + full_name="google.bigtable.admin.v2.CreateAppProfileRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.CreateAppProfileRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.admin.v2.CreateAppProfileRequest.app_profile_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="app_profile", + full_name="google.bigtable.admin.v2.CreateAppProfileRequest.app_profile", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="ignore_warnings", + full_name="google.bigtable.admin.v2.CreateAppProfileRequest.ignore_warnings", + index=3, + number=4, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2701, + serialized_end=2902, +) + + +_GETAPPPROFILEREQUEST = _descriptor.Descriptor( + name="GetAppProfileRequest", + full_name="google.bigtable.admin.v2.GetAppProfileRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GetAppProfileRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A$\n"bigtable.googleapis.com/AppProfile', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2904, + serialized_end=2984, +) + + +_LISTAPPPROFILESREQUEST = _descriptor.Descriptor( + name="ListAppProfilesRequest", + full_name="google.bigtable.admin.v2.ListAppProfilesRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.ListAppProfilesRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="page_size", + full_name="google.bigtable.admin.v2.ListAppProfilesRequest.page_size", + index=1, + number=3, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.bigtable.admin.v2.ListAppProfilesRequest.page_token", + index=2, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2986, + serialized_end=3107, +) + + +_LISTAPPPROFILESRESPONSE = _descriptor.Descriptor( + name="ListAppProfilesResponse", + full_name="google.bigtable.admin.v2.ListAppProfilesResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="app_profiles", + full_name="google.bigtable.admin.v2.ListAppProfilesResponse.app_profiles", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.bigtable.admin.v2.ListAppProfilesResponse.next_page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="failed_locations", + full_name="google.bigtable.admin.v2.ListAppProfilesResponse.failed_locations", + index=2, + number=3, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3110, + serialized_end=3246, +) + + +_UPDATEAPPPROFILEREQUEST = _descriptor.Descriptor( + name="UpdateAppProfileRequest", + full_name="google.bigtable.admin.v2.UpdateAppProfileRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="app_profile", + full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.app_profile", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="update_mask", + full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.update_mask", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="ignore_warnings", + full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.ignore_warnings", + index=2, + number=3, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3249, + serialized_end=3417, +) + + +_DELETEAPPPROFILEREQUEST = _descriptor.Descriptor( + name="DeleteAppProfileRequest", + full_name="google.bigtable.admin.v2.DeleteAppProfileRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DeleteAppProfileRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A$\n"bigtable.googleapis.com/AppProfile', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="ignore_warnings", + full_name="google.bigtable.admin.v2.DeleteAppProfileRequest.ignore_warnings", + index=1, + number=2, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3419, + serialized_end=3527, +) + + +_UPDATEAPPPROFILEMETADATA = _descriptor.Descriptor( + name="UpdateAppProfileMetadata", + full_name="google.bigtable.admin.v2.UpdateAppProfileMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3529, + serialized_end=3555, +) + +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name[ + "value" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER +) +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.containing_type = _CREATEINSTANCEREQUEST +_CREATEINSTANCEREQUEST.fields_by_name[ + "instance" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE +) +_CREATEINSTANCEREQUEST.fields_by_name[ + "clusters" +].message_type = _CREATEINSTANCEREQUEST_CLUSTERSENTRY +_LISTINSTANCESRESPONSE.fields_by_name[ + "instances" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE +) +_PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ + "instance" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE +) +_PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ + "update_mask" +].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK +_CREATECLUSTERREQUEST.fields_by_name[ + "cluster" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER +) +_LISTCLUSTERSRESPONSE.fields_by_name[ + "clusters" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER +) +_CREATEINSTANCEMETADATA.fields_by_name[ + "original_request" +].message_type = _CREATEINSTANCEREQUEST +_CREATEINSTANCEMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEINSTANCEMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATEINSTANCEMETADATA.fields_by_name[ + "original_request" +].message_type = _PARTIALUPDATEINSTANCEREQUEST +_UPDATEINSTANCEMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATEINSTANCEMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATECLUSTERMETADATA.fields_by_name[ + "original_request" +].message_type = _CREATECLUSTERREQUEST +_CREATECLUSTERMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATECLUSTERMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATECLUSTERMETADATA.fields_by_name[ + "original_request" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER +) +_UPDATECLUSTERMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATECLUSTERMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEAPPPROFILEREQUEST.fields_by_name[ + "app_profile" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE +) +_LISTAPPPROFILESRESPONSE.fields_by_name[ + "app_profiles" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE +) +_UPDATEAPPPROFILEREQUEST.fields_by_name[ + "app_profile" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE +) +_UPDATEAPPPROFILEREQUEST.fields_by_name[ + "update_mask" +].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK +DESCRIPTOR.message_types_by_name["CreateInstanceRequest"] = _CREATEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name["GetInstanceRequest"] = _GETINSTANCEREQUEST +DESCRIPTOR.message_types_by_name["ListInstancesRequest"] = _LISTINSTANCESREQUEST +DESCRIPTOR.message_types_by_name["ListInstancesResponse"] = _LISTINSTANCESRESPONSE +DESCRIPTOR.message_types_by_name[ + "PartialUpdateInstanceRequest" +] = _PARTIALUPDATEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name["DeleteInstanceRequest"] = _DELETEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name["CreateClusterRequest"] = _CREATECLUSTERREQUEST +DESCRIPTOR.message_types_by_name["GetClusterRequest"] = _GETCLUSTERREQUEST +DESCRIPTOR.message_types_by_name["ListClustersRequest"] = _LISTCLUSTERSREQUEST +DESCRIPTOR.message_types_by_name["ListClustersResponse"] = _LISTCLUSTERSRESPONSE +DESCRIPTOR.message_types_by_name["DeleteClusterRequest"] = _DELETECLUSTERREQUEST +DESCRIPTOR.message_types_by_name["CreateInstanceMetadata"] = _CREATEINSTANCEMETADATA +DESCRIPTOR.message_types_by_name["UpdateInstanceMetadata"] = _UPDATEINSTANCEMETADATA +DESCRIPTOR.message_types_by_name["CreateClusterMetadata"] = _CREATECLUSTERMETADATA +DESCRIPTOR.message_types_by_name["UpdateClusterMetadata"] = _UPDATECLUSTERMETADATA +DESCRIPTOR.message_types_by_name["CreateAppProfileRequest"] = _CREATEAPPPROFILEREQUEST +DESCRIPTOR.message_types_by_name["GetAppProfileRequest"] = _GETAPPPROFILEREQUEST +DESCRIPTOR.message_types_by_name["ListAppProfilesRequest"] = _LISTAPPPROFILESREQUEST +DESCRIPTOR.message_types_by_name["ListAppProfilesResponse"] = _LISTAPPPROFILESRESPONSE +DESCRIPTOR.message_types_by_name["UpdateAppProfileRequest"] = _UPDATEAPPPROFILEREQUEST +DESCRIPTOR.message_types_by_name["DeleteAppProfileRequest"] = _DELETEAPPPROFILEREQUEST +DESCRIPTOR.message_types_by_name["UpdateAppProfileMetadata"] = _UPDATEAPPPROFILEMETADATA +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +CreateInstanceRequest = _reflection.GeneratedProtocolMessageType( + "CreateInstanceRequest", + (_message.Message,), + { + "ClustersEntry": _reflection.GeneratedProtocolMessageType( + "ClustersEntry", + (_message.Message,), + { + "DESCRIPTOR": _CREATEINSTANCEREQUEST_CLUSTERSENTRY, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2" + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) + }, + ), + "DESCRIPTOR": _CREATEINSTANCEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.CreateInstance. + + Attributes: + parent: + Required. The unique name of the project in which to create + the new instance. Values are of the form + ``projects/{project}``. + instance_id: + Required. The ID to be used when referring to the new instance + within its project, e.g., just ``myinstance`` rather than + ``projects/myproject/instances/myinstance``. + instance: + Required. The instance to create. Fields marked ``OutputOnly`` + must be left blank. + clusters: + Required. The clusters to be created within the instance, + mapped by desired cluster ID, e.g., just ``mycluster`` rather + than ``projects/myproject/instances/myinstance/clusters/myclus + ter``. Fields marked ``OutputOnly`` must be left blank. + Currently, at most four clusters can be specified. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) + }, +) +_sym_db.RegisterMessage(CreateInstanceRequest) +_sym_db.RegisterMessage(CreateInstanceRequest.ClustersEntry) + +GetInstanceRequest = _reflection.GeneratedProtocolMessageType( + "GetInstanceRequest", + (_message.Message,), + { + "DESCRIPTOR": _GETINSTANCEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.GetInstance. + + Attributes: + name: + Required. The unique name of the requested instance. Values + are of the form ``projects/{project}/instances/{instance}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) + }, +) +_sym_db.RegisterMessage(GetInstanceRequest) + +ListInstancesRequest = _reflection.GeneratedProtocolMessageType( + "ListInstancesRequest", + (_message.Message,), + { + "DESCRIPTOR": _LISTINSTANCESREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.ListInstances. + + Attributes: + parent: + Required. The unique name of the project for which a list of + instances is requested. Values are of the form + ``projects/{project}``. + page_token: + DEPRECATED: This field is unused and ignored. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) + }, +) +_sym_db.RegisterMessage(ListInstancesRequest) + +ListInstancesResponse = _reflection.GeneratedProtocolMessageType( + "ListInstancesResponse", + (_message.Message,), + { + "DESCRIPTOR": _LISTINSTANCESRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Response message for BigtableInstanceAdmin.ListInstances. + + Attributes: + instances: + The list of requested instances. + failed_locations: + Locations from which Instance information could not be + retrieved, due to an outage or some other transient condition. + Instances whose Clusters are all in one of the failed + locations may be missing from ``instances``, and Instances + with at least one Cluster in a failed location may only have + partial information returned. Values are of the form + ``projects//locations/`` + next_page_token: + DEPRECATED: This field is unused and ignored. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) + }, +) +_sym_db.RegisterMessage(ListInstancesResponse) + +PartialUpdateInstanceRequest = _reflection.GeneratedProtocolMessageType( + "PartialUpdateInstanceRequest", + (_message.Message,), + { + "DESCRIPTOR": _PARTIALUPDATEINSTANCEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.PartialUpdateInstance. + + Attributes: + instance: + Required. The Instance which will (partially) replace the + current value. + update_mask: + Required. The subset of Instance fields which should be + replaced. Must be explicitly set. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.PartialUpdateInstanceRequest) + }, +) +_sym_db.RegisterMessage(PartialUpdateInstanceRequest) + +DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType( + "DeleteInstanceRequest", + (_message.Message,), + { + "DESCRIPTOR": _DELETEINSTANCEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.DeleteInstance. + + Attributes: + name: + Required. The unique name of the instance to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) + }, +) +_sym_db.RegisterMessage(DeleteInstanceRequest) + +CreateClusterRequest = _reflection.GeneratedProtocolMessageType( + "CreateClusterRequest", + (_message.Message,), + { + "DESCRIPTOR": _CREATECLUSTERREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.CreateCluster. + + Attributes: + parent: + Required. The unique name of the instance in which to create + the new cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + cluster_id: + Required. The ID to be used when referring to the new cluster + within its instance, e.g., just ``mycluster`` rather than ``pr + ojects/myproject/instances/myinstance/clusters/mycluster``. + cluster: + Required. The cluster to be created. Fields marked + ``OutputOnly`` must be left blank. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) + }, +) +_sym_db.RegisterMessage(CreateClusterRequest) + +GetClusterRequest = _reflection.GeneratedProtocolMessageType( + "GetClusterRequest", + (_message.Message,), + { + "DESCRIPTOR": _GETCLUSTERREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.GetCluster. + + Attributes: + name: + Required. The unique name of the requested cluster. Values are + of the form ``projects/{project}/instances/{instance}/clusters + /{cluster}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) + }, +) +_sym_db.RegisterMessage(GetClusterRequest) + +ListClustersRequest = _reflection.GeneratedProtocolMessageType( + "ListClustersRequest", + (_message.Message,), + { + "DESCRIPTOR": _LISTCLUSTERSREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.ListClusters. + + Attributes: + parent: + Required. The unique name of the instance for which a list of + clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use ``{instance} + = '-'`` to list Clusters for all Instances in a project, e.g., + ``projects/myproject/instances/-``. + page_token: + DEPRECATED: This field is unused and ignored. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) + }, +) +_sym_db.RegisterMessage(ListClustersRequest) + +ListClustersResponse = _reflection.GeneratedProtocolMessageType( + "ListClustersResponse", + (_message.Message,), + { + "DESCRIPTOR": _LISTCLUSTERSRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Response message for BigtableInstanceAdmin.ListClusters. + + Attributes: + clusters: + The list of requested clusters. + failed_locations: + Locations from which Cluster information could not be + retrieved, due to an outage or some other transient condition. + Clusters from these locations may be missing from + ``clusters``, or may only have partial information returned. + Values are of the form + ``projects//locations/`` + next_page_token: + DEPRECATED: This field is unused and ignored. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) + }, +) +_sym_db.RegisterMessage(ListClustersResponse) + +DeleteClusterRequest = _reflection.GeneratedProtocolMessageType( + "DeleteClusterRequest", + (_message.Message,), + { + "DESCRIPTOR": _DELETECLUSTERREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.DeleteCluster. + + Attributes: + name: + Required. The unique name of the cluster to be deleted. Values + are of the form ``projects/{project}/instances/{instance}/clus + ters/{cluster}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) + }, +) +_sym_db.RegisterMessage(DeleteClusterRequest) + +CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType( + "CreateInstanceMetadata", + (_message.Message,), + { + "DESCRIPTOR": _CREATEINSTANCEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """The metadata for the Operation returned by CreateInstance. + + Attributes: + original_request: + The request that prompted the initiation of this + CreateInstance operation. + request_time: + The time at which the original request was received. + finish_time: + The time at which the operation failed or was completed + successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) + }, +) +_sym_db.RegisterMessage(CreateInstanceMetadata) + +UpdateInstanceMetadata = _reflection.GeneratedProtocolMessageType( + "UpdateInstanceMetadata", + (_message.Message,), + { + "DESCRIPTOR": _UPDATEINSTANCEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """The metadata for the Operation returned by UpdateInstance. + + Attributes: + original_request: + The request that prompted the initiation of this + UpdateInstance operation. + request_time: + The time at which the original request was received. + finish_time: + The time at which the operation failed or was completed + successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateInstanceMetadata) + }, +) +_sym_db.RegisterMessage(UpdateInstanceMetadata) + +CreateClusterMetadata = _reflection.GeneratedProtocolMessageType( + "CreateClusterMetadata", + (_message.Message,), + { + "DESCRIPTOR": _CREATECLUSTERMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """The metadata for the Operation returned by CreateCluster. + + Attributes: + original_request: + The request that prompted the initiation of this CreateCluster + operation. + request_time: + The time at which the original request was received. + finish_time: + The time at which the operation failed or was completed + successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterMetadata) + }, +) +_sym_db.RegisterMessage(CreateClusterMetadata) + +UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType( + "UpdateClusterMetadata", + (_message.Message,), + { + "DESCRIPTOR": _UPDATECLUSTERMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """The metadata for the Operation returned by UpdateCluster. + + Attributes: + original_request: + The request that prompted the initiation of this UpdateCluster + operation. + request_time: + The time at which the original request was received. + finish_time: + The time at which the operation failed or was completed + successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) + }, +) +_sym_db.RegisterMessage(UpdateClusterMetadata) + +CreateAppProfileRequest = _reflection.GeneratedProtocolMessageType( + "CreateAppProfileRequest", + (_message.Message,), + { + "DESCRIPTOR": _CREATEAPPPROFILEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.CreateAppProfile. + + Attributes: + parent: + Required. The unique name of the instance in which to create + the new app profile. Values are of the form + ``projects/{project}/instances/{instance}``. + app_profile_id: + Required. The ID to be used when referring to the new app + profile within its instance, e.g., just ``myprofile`` rather + than ``projects/myproject/instances/myinstance/appProfiles/myp + rofile``. + app_profile: + Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. + ignore_warnings: + If true, ignore safety checks when creating the app profile. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateAppProfileRequest) + }, +) +_sym_db.RegisterMessage(CreateAppProfileRequest) + +GetAppProfileRequest = _reflection.GeneratedProtocolMessageType( + "GetAppProfileRequest", + (_message.Message,), + { + "DESCRIPTOR": _GETAPPPROFILEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.GetAppProfile. + + Attributes: + name: + Required. The unique name of the requested app profile. Values + are of the form ``projects/{project}/instances/{instance}/appP + rofiles/{app_profile}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetAppProfileRequest) + }, +) +_sym_db.RegisterMessage(GetAppProfileRequest) + +ListAppProfilesRequest = _reflection.GeneratedProtocolMessageType( + "ListAppProfilesRequest", + (_message.Message,), + { + "DESCRIPTOR": _LISTAPPPROFILESREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.ListAppProfiles. + + Attributes: + parent: + Required. The unique name of the instance for which a list of + app profiles is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use ``{instance} + = '-'`` to list AppProfiles for all Instances in a project, + e.g., ``projects/myproject/instances/-``. + page_size: + Maximum number of results per page. A page_size of zero lets + the server choose the number of items to return. A page_size + which is strictly positive will return at most that many + items. A negative page_size will cause an error. Following + the first request, subsequent paginated calls are not required + to pass a page_size. If a page_size is set in subsequent + calls, it must match the page_size given in the first request. + page_token: + The value of ``next_page_token`` returned by a previous call. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesRequest) + }, +) +_sym_db.RegisterMessage(ListAppProfilesRequest) + +ListAppProfilesResponse = _reflection.GeneratedProtocolMessageType( + "ListAppProfilesResponse", + (_message.Message,), + { + "DESCRIPTOR": _LISTAPPPROFILESRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Response message for BigtableInstanceAdmin.ListAppProfiles. + + Attributes: + app_profiles: + The list of requested app profiles. + next_page_token: + Set if not all app profiles could be returned in a single + response. Pass this value to ``page_token`` in another request + to get the next page of results. + failed_locations: + Locations from which AppProfile information could not be + retrieved, due to an outage or some other transient condition. + AppProfiles from these locations may be missing from + ``app_profiles``. Values are of the form + ``projects//locations/`` + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesResponse) + }, +) +_sym_db.RegisterMessage(ListAppProfilesResponse) + +UpdateAppProfileRequest = _reflection.GeneratedProtocolMessageType( + "UpdateAppProfileRequest", + (_message.Message,), + { + "DESCRIPTOR": _UPDATEAPPPROFILEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.UpdateAppProfile. + + Attributes: + app_profile: + Required. The app profile which will (partially) replace the + current value. + update_mask: + Required. The subset of app profile fields which should be + replaced. If unset, all fields will be replaced. + ignore_warnings: + If true, ignore safety checks when updating the app profile. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileRequest) + }, +) +_sym_db.RegisterMessage(UpdateAppProfileRequest) + +DeleteAppProfileRequest = _reflection.GeneratedProtocolMessageType( + "DeleteAppProfileRequest", + (_message.Message,), + { + "DESCRIPTOR": _DELETEAPPPROFILEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.DeleteAppProfile. + + Attributes: + name: + Required. The unique name of the app profile to be deleted. + Values are of the form ``projects/{project}/instances/{instanc + e}/appProfiles/{app_profile}``. + ignore_warnings: + If true, ignore safety checks when deleting the app profile. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteAppProfileRequest) + }, +) +_sym_db.RegisterMessage(DeleteAppProfileRequest) + +UpdateAppProfileMetadata = _reflection.GeneratedProtocolMessageType( + "UpdateAppProfileMetadata", + (_message.Message,), + { + "DESCRIPTOR": _UPDATEAPPPROFILEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """The metadata for the Operation returned by UpdateAppProfile.""", + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileMetadata) + }, +) +_sym_db.RegisterMessage(UpdateAppProfileMetadata) + + +DESCRIPTOR._options = None +_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = None +_CREATEINSTANCEREQUEST.fields_by_name["parent"]._options = None +_CREATEINSTANCEREQUEST.fields_by_name["instance_id"]._options = None +_CREATEINSTANCEREQUEST.fields_by_name["instance"]._options = None +_CREATEINSTANCEREQUEST.fields_by_name["clusters"]._options = None +_GETINSTANCEREQUEST.fields_by_name["name"]._options = None +_LISTINSTANCESREQUEST.fields_by_name["parent"]._options = None +_PARTIALUPDATEINSTANCEREQUEST.fields_by_name["instance"]._options = None +_PARTIALUPDATEINSTANCEREQUEST.fields_by_name["update_mask"]._options = None +_DELETEINSTANCEREQUEST.fields_by_name["name"]._options = None +_CREATECLUSTERREQUEST.fields_by_name["parent"]._options = None +_CREATECLUSTERREQUEST.fields_by_name["cluster_id"]._options = None +_CREATECLUSTERREQUEST.fields_by_name["cluster"]._options = None +_GETCLUSTERREQUEST.fields_by_name["name"]._options = None +_LISTCLUSTERSREQUEST.fields_by_name["parent"]._options = None +_DELETECLUSTERREQUEST.fields_by_name["name"]._options = None +_CREATEAPPPROFILEREQUEST.fields_by_name["parent"]._options = None +_CREATEAPPPROFILEREQUEST.fields_by_name["app_profile_id"]._options = None +_CREATEAPPPROFILEREQUEST.fields_by_name["app_profile"]._options = None +_GETAPPPROFILEREQUEST.fields_by_name["name"]._options = None +_LISTAPPPROFILESREQUEST.fields_by_name["parent"]._options = None +_UPDATEAPPPROFILEREQUEST.fields_by_name["app_profile"]._options = None +_UPDATEAPPPROFILEREQUEST.fields_by_name["update_mask"]._options = None +_DELETEAPPPROFILEREQUEST.fields_by_name["name"]._options = None + +_BIGTABLEINSTANCEADMIN = _descriptor.ServiceDescriptor( + name="BigtableInstanceAdmin", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin", + file=DESCRIPTOR, + index=0, + serialized_options=b"\312A\034bigtableadmin.googleapis.com\322A\367\002https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", + create_key=_descriptor._internal_create_key, + serialized_start=3558, + serialized_end=7416, + methods=[ + _descriptor.MethodDescriptor( + name="CreateInstance", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance", + index=0, + containing_service=None, + input_type=_CREATEINSTANCEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=b'\202\323\344\223\002&"!/v2/{parent=projects/*}/instances:\001*\332A$parent,instance_id,instance,clusters\312A"\n\010Instance\022\026CreateInstanceMetadata', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="GetInstance", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance", + index=1, + containing_service=None, + input_type=_GETINSTANCEREQUEST, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, + serialized_options=b"\202\323\344\223\002#\022!/v2/{name=projects/*/instances/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="ListInstances", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances", + index=2, + containing_service=None, + input_type=_LISTINSTANCESREQUEST, + output_type=_LISTINSTANCESRESPONSE, + serialized_options=b"\202\323\344\223\002#\022!/v2/{parent=projects/*}/instances\332A\006parent", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="UpdateInstance", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", + index=3, + containing_service=None, + input_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, + serialized_options=b"\202\323\344\223\002&\032!/v2/{name=projects/*/instances/*}:\001*", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="PartialUpdateInstance", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance", + index=4, + containing_service=None, + input_type=_PARTIALUPDATEINSTANCEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=b'\202\323\344\223\00262*/v2/{instance.name=projects/*/instances/*}:\010instance\332A\024instance,update_mask\312A"\n\010Instance\022\026UpdateInstanceMetadata', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="DeleteInstance", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance", + index=5, + containing_service=None, + input_type=_DELETEINSTANCEREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + serialized_options=b"\202\323\344\223\002#*!/v2/{name=projects/*/instances/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="CreateCluster", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster", + index=6, + containing_service=None, + input_type=_CREATECLUSTERREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=b'\202\323\344\223\0027",/v2/{parent=projects/*/instances/*}/clusters:\007cluster\332A\031parent,cluster_id,cluster\312A \n\007Cluster\022\025CreateClusterMetadata', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="GetCluster", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster", + index=7, + containing_service=None, + input_type=_GETCLUSTERREQUEST, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER, + serialized_options=b"\202\323\344\223\002.\022,/v2/{name=projects/*/instances/*/clusters/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="ListClusters", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters", + index=8, + containing_service=None, + input_type=_LISTCLUSTERSREQUEST, + output_type=_LISTCLUSTERSRESPONSE, + serialized_options=b"\202\323\344\223\002.\022,/v2/{parent=projects/*/instances/*}/clusters\332A\006parent", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="UpdateCluster", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", + index=9, + containing_service=None, + input_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=b"\202\323\344\223\0021\032,/v2/{name=projects/*/instances/*/clusters/*}:\001*\312A \n\007Cluster\022\025UpdateClusterMetadata", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="DeleteCluster", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster", + index=10, + containing_service=None, + input_type=_DELETECLUSTERREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + serialized_options=b"\202\323\344\223\002.*,/v2/{name=projects/*/instances/*/clusters/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="CreateAppProfile", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile", + index=11, + containing_service=None, + input_type=_CREATEAPPPROFILEREQUEST, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, + serialized_options=b'\202\323\344\223\002>"//v2/{parent=projects/*/instances/*}/appProfiles:\013app_profile\332A!parent,app_profile_id,app_profile', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="GetAppProfile", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile", + index=12, + containing_service=None, + input_type=_GETAPPPROFILEREQUEST, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, + serialized_options=b"\202\323\344\223\0021\022//v2/{name=projects/*/instances/*/appProfiles/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="ListAppProfiles", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles", + index=13, + containing_service=None, + input_type=_LISTAPPPROFILESREQUEST, + output_type=_LISTAPPPROFILESRESPONSE, + serialized_options=b"\202\323\344\223\0021\022//v2/{parent=projects/*/instances/*}/appProfiles\332A\006parent", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="UpdateAppProfile", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile", + index=14, + containing_service=None, + input_type=_UPDATEAPPPROFILEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=b"\202\323\344\223\002J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\013app_profile\332A\027app_profile,update_mask\312A&\n\nAppProfile\022\030UpdateAppProfileMetadata", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="DeleteAppProfile", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile", + index=15, + containing_service=None, + input_type=_DELETEAPPPROFILEREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + serialized_options=b"\202\323\344\223\0021*//v2/{name=projects/*/instances/*/appProfiles/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="GetIamPolicy", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy", + index=16, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, + output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, + serialized_options=b'\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\001*\332A\010resource', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="SetIamPolicy", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy", + index=17, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, + output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, + serialized_options=b'\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\001*\332A\017resource,policy', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="TestIamPermissions", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions", + index=18, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, + output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, + serialized_options=b'\202\323\344\223\002="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\001*\332A\024resource,permissions', + create_key=_descriptor._internal_create_key, + ), + ], +) +_sym_db.RegisterServiceDescriptor(_BIGTABLEINSTANCEADMIN) + +DESCRIPTOR.services_by_name["BigtableInstanceAdmin"] = _BIGTABLEINSTANCEADMIN + +# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py new file mode 100644 index 000000000..8b1395579 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py @@ -0,0 +1,895 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2, +) +from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2, +) +from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 +from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 + + +class BigtableInstanceAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables' metadata or data stored in those tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateInstance = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetInstance = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + ) + self.ListInstances = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, + ) + self.UpdateInstance = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + ) + self.PartialUpdateInstance = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.DeleteInstance = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.CreateCluster = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetCluster = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, + ) + self.ListClusters = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, + ) + self.UpdateCluster = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.DeleteCluster = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.CreateAppProfile = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, + ) + self.GetAppProfile = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, + ) + self.ListAppProfiles = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, + ) + self.UpdateAppProfile = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.DeleteAppProfile = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.GetIamPolicy = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + ) + self.SetIamPolicy = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + ) + self.TestIamPermissions = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, + ) + + +class BigtableInstanceAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables' metadata or data stored in those tables. + """ + + def CreateInstance(self, request, context): + """Create an instance within a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetInstance(self, request, context): + """Gets information about an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListInstances(self, request, context): + """Lists information about instances in a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateInstance(self, request, context): + """Updates an instance within a project. This method updates only the display + name and type for an Instance. To update other Instance properties, such as + labels, use PartialUpdateInstance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def PartialUpdateInstance(self, request, context): + """Partially updates an instance within a project. This method can modify all + fields of an Instance and is the preferred way to update an Instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteInstance(self, request, context): + """Delete an instance from a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CreateCluster(self, request, context): + """Creates a cluster within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetCluster(self, request, context): + """Gets information about a cluster. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListClusters(self, request, context): + """Lists information about clusters in an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateCluster(self, request, context): + """Updates a cluster within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteCluster(self, request, context): + """Deletes a cluster from an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CreateAppProfile(self, request, context): + """Creates an app profile within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetAppProfile(self, request, context): + """Gets information about an app profile. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListAppProfiles(self, request, context): + """Lists information about app profiles in an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateAppProfile(self, request, context): + """Updates an app profile within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteAppProfile(self, request, context): + """Deletes an app profile from an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetIamPolicy(self, request, context): + """Gets the access control policy for an instance resource. Returns an empty + policy if an instance exists but does not have a policy set. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def SetIamPolicy(self, request, context): + """Sets the access control policy on an instance resource. Replaces any + existing policy. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def TestIamPermissions(self, request, context): + """Returns permissions that the caller has on the specified instance resource. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + +def add_BigtableInstanceAdminServicer_to_server(servicer, server): + rpc_method_handlers = { + "CreateInstance": grpc.unary_unary_rpc_method_handler( + servicer.CreateInstance, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetInstance": grpc.unary_unary_rpc_method_handler( + servicer.GetInstance, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + ), + "ListInstances": grpc.unary_unary_rpc_method_handler( + servicer.ListInstances, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.SerializeToString, + ), + "UpdateInstance": grpc.unary_unary_rpc_method_handler( + servicer.UpdateInstance, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + ), + "PartialUpdateInstance": grpc.unary_unary_rpc_method_handler( + servicer.PartialUpdateInstance, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "DeleteInstance": grpc.unary_unary_rpc_method_handler( + servicer.DeleteInstance, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "CreateCluster": grpc.unary_unary_rpc_method_handler( + servicer.CreateCluster, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetCluster": grpc.unary_unary_rpc_method_handler( + servicer.GetCluster, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, + ), + "ListClusters": grpc.unary_unary_rpc_method_handler( + servicer.ListClusters, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.SerializeToString, + ), + "UpdateCluster": grpc.unary_unary_rpc_method_handler( + servicer.UpdateCluster, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "DeleteCluster": grpc.unary_unary_rpc_method_handler( + servicer.DeleteCluster, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "CreateAppProfile": grpc.unary_unary_rpc_method_handler( + servicer.CreateAppProfile, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, + ), + "GetAppProfile": grpc.unary_unary_rpc_method_handler( + servicer.GetAppProfile, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, + ), + "ListAppProfiles": grpc.unary_unary_rpc_method_handler( + servicer.ListAppProfiles, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.SerializeToString, + ), + "UpdateAppProfile": grpc.unary_unary_rpc_method_handler( + servicer.UpdateAppProfile, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "DeleteAppProfile": grpc.unary_unary_rpc_method_handler( + servicer.DeleteAppProfile, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "GetIamPolicy": grpc.unary_unary_rpc_method_handler( + servicer.GetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + "SetIamPolicy": grpc.unary_unary_rpc_method_handler( + servicer.SetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + "TestIamPermissions": grpc.unary_unary_rpc_method_handler( + servicer.TestIamPermissions, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + "google.bigtable.admin.v2.BigtableInstanceAdmin", rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) + + +# This class is part of an EXPERIMENTAL API. +class BigtableInstanceAdmin(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables' metadata or data stored in those tables. + """ + + @staticmethod + def CreateInstance( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetInstance( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ListInstances( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def UpdateInstance( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def PartialUpdateInstance( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DeleteInstance( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def CreateCluster( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetCluster( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ListClusters( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def UpdateCluster( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DeleteCluster( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def CreateAppProfile( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetAppProfile( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ListAppProfiles( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def UpdateAppProfile( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DeleteAppProfile( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetIamPolicy( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", + google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, + google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def SetIamPolicy( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", + google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, + google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def TestIamPermissions( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", + google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, + google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto index d979dba59..6f434a473 100644 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto @@ -72,7 +72,8 @@ service BigtableTableAdmin { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) returns (google.longrunning.Operation) { + rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" body: "*" @@ -134,7 +135,8 @@ service BigtableTableAdmin { // CheckConsistency to check whether mutations to the table that finished // before this call started have been replicated. The tokens will be available // for 90 days. - rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) returns (GenerateConsistencyTokenResponse) { + rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) + returns (GenerateConsistencyTokenResponse) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" body: "*" @@ -145,7 +147,8 @@ service BigtableTableAdmin { // Checks replication consistency based on a consistency token, that is, if // replication has caught up based on the conditions specified in the token // and the check request. - rpc CheckConsistency(CheckConsistencyRequest) returns (CheckConsistencyResponse) { + rpc CheckConsistency(CheckConsistencyRequest) + returns (CheckConsistencyResponse) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" body: "*" @@ -161,12 +164,14 @@ service BigtableTableAdmin { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - rpc SnapshotTable(SnapshotTableRequest) returns (google.longrunning.Operation) { + rpc SnapshotTable(SnapshotTableRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" body: "*" }; - option (google.api.method_signature) = "name,cluster,snapshot_id,description"; + option (google.api.method_signature) = + "name,cluster,snapshot_id,description"; option (google.longrunning.operation_info) = { response_type: "Snapshot" metadata_type: "SnapshotTableMetadata" @@ -215,24 +220,24 @@ service BigtableTableAdmin { option (google.api.method_signature) = "name"; } - // Starts creating a new Cloud Bigtable Backup. The returned backup + // Starts creating a new Cloud Bigtable Backup. The returned backup // [long-running operation][google.longrunning.Operation] can be used to // track creation of the backup. The // [metadata][google.longrunning.Operation.metadata] field type is // [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The // [response][google.longrunning.Operation.response] field type is - // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the - // creation and delete the backup. + // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the + // returned operation will stop the creation and delete the backup. rpc CreateBackup(CreateBackupRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*/clusters/*}/backups" body: "backup" }; - option (google.api.method_signature) = "parent,backup_id,backup"; option (google.longrunning.operation_info) = { response_type: "Backup" metadata_type: "CreateBackupMetadata" }; + option (google.api.method_signature) = "parent,backup_id,backup"; } // Gets metadata on a pending or completed Cloud Bigtable Backup. @@ -270,11 +275,11 @@ service BigtableTableAdmin { } // Create a new table by restoring from a completed backup. The new table - // must be in the same instance as the instance containing the backup. The + // must be in the same instance as the instance containing the backup. The // returned table [long-running operation][google.longrunning.Operation] can - // be used to track the progress of the operation, and to cancel it. The + // be used to track the progress of the operation, and to cancel it. The // [metadata][google.longrunning.Operation.metadata] field type is - // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. rpc RestoreTable(RestoreTableRequest) returns (google.longrunning.Operation) { @@ -288,24 +293,22 @@ service BigtableTableAdmin { }; } - // Gets the access control policy for a Table or Backup resource. + // Gets the access control policy for a resource. // Returns an empty policy if the resource exists but does not have a policy // set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) + returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" body: "*" - additional_bindings { - post: "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy" - body: "*" - } }; option (google.api.method_signature) = "resource"; } // Sets the access control policy on a Table or Backup resource. // Replaces any existing policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) + returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" body: "*" @@ -317,8 +320,9 @@ service BigtableTableAdmin { option (google.api.method_signature) = "resource,policy"; } - // Returns permissions that the caller has on the specified Table or Backup resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { + // Returns permissions that the caller has on the specified table resource. + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) + returns (google.iam.v1.TestIamPermissionsResponse) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" body: "*" @@ -331,78 +335,6 @@ service BigtableTableAdmin { } } -// The request for -// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. -message RestoreTableRequest { - // Required. The name of the instance in which to create the restored - // table. This instance must be the parent of the source backup. Values are - // of the form `projects//instances/`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Instance" - } - ]; - - // Required. The id of the table to create and restore to. This - // table must not already exist. The `table_id` appended to - // `parent` forms the full table name of the form - // `projects//instances//tables/`. - string table_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The source from which to restore. - oneof source { - // Name of the backup from which to restore. Values are of the form - // `projects//instances//clusters//backups/`. - string backup = 3 [(google.api.resource_reference) = { - type: "bigtable.googleapis.com/Backup" - }]; - } -} - -// Metadata type for the long-running operation returned by -// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. -message RestoreTableMetadata { - // Name of the table being created and restored to. - string name = 1; - - // The type of the restore source. - RestoreSourceType source_type = 2; - - // Information about the source used to restore the table, as specified by - // `source` in [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. - oneof source_info { - BackupInfo backup_info = 3; - } - - // If exists, the name of the long-running operation that will be used to - // track the post-restore optimization process to optimize the performance of - // the restored table. The metadata type of the long-running operation is - // [OptimizeRestoreTableMetadata][]. The response type is - // [Empty][google.protobuf.Empty]. This long-running operation may be - // automatically created by the system if applicable after the - // RestoreTable long-running operation completes successfully. This operation - // may not be created if the table is already optimized or the restore was - // not successful. - string optimize_table_operation_name = 4; - - // The progress of the [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] - // operation. - OperationProgress progress = 5; -} - -// Metadata type for the long-running operation used to track the progress -// of optimizations performed on a newly restored table. This long-running -// operation is automatically created by the system after the successful -// completion of a table restore, and cannot be cancelled. -message OptimizeRestoredTableMetadata { - // Name of the restored table being optimized. - string name = 1; - - // The progress of the post-restore optimizations. - OperationProgress progress = 2; -} - // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] message CreateTableRequest { @@ -421,8 +353,8 @@ message CreateTableRequest { } ]; - // Required. The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + // Required. The name by which the new table should be referred to within the + // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. // Maximum 50 characters. string table_id = 2 [(google.api.field_behavior) = REQUIRED]; @@ -465,13 +397,13 @@ message CreateTableFromSnapshotRequest { } ]; - // Required. The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + // Required. The name by which the new table should be referred to within the + // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. string table_id = 2 [(google.api.field_behavior) = REQUIRED]; - // Required. The unique name of the snapshot from which to restore the table. The - // snapshot and the table must be in the same instance. - // Values are of the form + // Required. The unique name of the snapshot from which to restore the table. + // The snapshot and the table must be in the same instance. Values are of the + // form // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. string source_snapshot = 3 [ (google.api.field_behavior) = REQUIRED, @@ -489,9 +421,7 @@ message DropRowRangeRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } ]; // Delete all rows or by prefix. @@ -508,8 +438,8 @@ message DropRowRangeRequest { // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] message ListTablesRequest { - // Required. The unique name of the instance for which tables should be listed. - // Values are of the form `projects/{project}/instances/{instance}`. + // Required. The unique name of the instance for which tables should be + // listed. Values are of the form `projects/{project}/instances/{instance}`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -556,9 +486,7 @@ message GetTableRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } ]; // The view to be applied to the returned table's fields. @@ -574,9 +502,7 @@ message DeleteTableRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } ]; } @@ -609,29 +535,26 @@ message ModifyColumnFamiliesRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } ]; - // Required. Modifications to be atomically applied to the specified table's families. - // Entries are applied in order, meaning that earlier modifications can be - // masked by later ones (in the case of repeated updates to the same family, - // for example). - repeated Modification modifications = 2 [(google.api.field_behavior) = REQUIRED]; + // Required. Modifications to be atomically applied to the specified table's + // families. Entries are applied in order, meaning that earlier modifications + // can be masked by later ones (in the case of repeated updates to the same + // family, for example). + repeated Modification modifications = 2 + [(google.api.field_behavior) = REQUIRED]; } // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] message GenerateConsistencyTokenRequest { - // Required. The unique name of the Table for which to create a consistency token. - // Values are of the form + // Required. The unique name of the Table for which to create a consistency + // token. Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } ]; } @@ -645,14 +568,12 @@ message GenerateConsistencyTokenResponse { // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] message CheckConsistencyRequest { - // Required. The unique name of the Table for which to check replication consistency. - // Values are of the form + // Required. The unique name of the Table for which to check replication + // consistency. Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } ]; // Required. The token created using GenerateConsistencyToken for the Table. @@ -680,9 +601,7 @@ message SnapshotTableRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } ]; // Required. The name of the cluster where the snapshot will be created in. @@ -695,9 +614,9 @@ message SnapshotTableRequest { } ]; - // Required. The ID by which the new snapshot should be referred to within the parent - // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` - // rather than + // Required. The ID by which the new snapshot should be referred to within the + // parent cluster, e.g., `mysnapshot` of the form: + // `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` rather than // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot`. string snapshot_id = 3 [(google.api.field_behavior) = REQUIRED]; @@ -738,8 +657,8 @@ message GetSnapshotRequest { // feature might be changed in backward-incompatible ways and is not recommended // for production use. It is not subject to any SLA or deprecation policy. message ListSnapshotsRequest { - // Required. The unique name of the cluster for which snapshots should be listed. - // Values are of the form + // Required. The unique name of the cluster for which snapshots should be + // listed. Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}`. // Use `{cluster} = '-'` to list snapshots for all clusters in an instance, // e.g., `projects/{project}/instances/{instance}/clusters/-`. @@ -829,7 +748,8 @@ message CreateTableFromSnapshotMetadata { google.protobuf.Timestamp finish_time = 3; } -// The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. +// The request for +// [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. message CreateBackupRequest { // Required. This must be one of the clusters in the instance in which this // table is located. The backup will be stored in this cluster. Values are @@ -869,7 +789,20 @@ message CreateBackupMetadata { google.protobuf.Timestamp end_time = 4; } -// The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. +// The request for +// [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. +message GetBackupRequest { + // Required. Name of the backup. + // Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Backup" } + ]; +} + +// The request for +// [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. message UpdateBackupRequest { // Required. The backup to update. `backup.name`, and the fields to be updated // as specified by `update_mask` are required. Other fields are ignored. @@ -882,38 +815,26 @@ message UpdateBackupRequest { // resource, not to the request message. The field mask must always be // specified; this prevents any future fields from being erased accidentally // by clients that do not know about them. - google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. -message GetBackupRequest { - // Required. Name of the backup. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Backup" - } - ]; + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; } -// The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. +// The request for +// [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. message DeleteBackupRequest { // Required. Name of the backup to delete. // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Backup" - } + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Backup" } ]; } -// The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +// The request for +// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. message ListBackupsRequest { - // Required. The cluster to list backups from. Values are of the + // Required. The cluster to list backups from. Values are of the // form `projects/{project}/instances/{instance}/clusters/{cluster}`. // Use `{cluster} = '-'` to list backups for all clusters in an instance, // e.g., `projects/{project}/instances/{instance}/clusters/-`. @@ -928,7 +849,7 @@ message ListBackupsRequest { // The expression must specify the field name, a comparison operator, // and the value that you want to use for filtering. The value must be a // string, a number, or a boolean. The comparison operator must be - // <, >, <=, >=, !=, =, or :. Colon ':' represents a HAS operator which is + // <, >, <=, >=, !=, =, or :. Colon ‘:’ represents a HAS operator which is // roughly synonymous with equality. Filter rules are case insensitive. // // The fields eligible for filtering are: @@ -959,8 +880,9 @@ message ListBackupsRequest { string filter = 2; // An expression for specifying the sort order of the results of the request. - // The string value should specify one or more fields in [Backup][google.bigtable.admin.v2.Backup]. The full - // syntax is described at https://aip.dev/132#ordering. + // The string value should specify one or more fields in + // [Backup][google.bigtable.admin.v2.Backup]. The full syntax is described at + // https://aip.dev/132#ordering. // // Fields supported are: // * name @@ -985,19 +907,88 @@ message ListBackupsRequest { int32 page_size = 4; // If non-empty, `page_token` should contain a - // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] from a - // previous [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the same `parent` and with the same - // `filter`. + // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] + // from a previous + // [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the + // same `parent` and with the same `filter`. string page_token = 5; } -// The response for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +// The response for +// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. message ListBackupsResponse { // The list of matching backups. repeated Backup backups = 1; // `next_page_token` can be sent in a subsequent - // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call to fetch more - // of the matching backups. + // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call + // to fetch more of the matching backups. string next_page_token = 2; } + +// The request for +// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. +message RestoreTableRequest { + // Required. The name of the instance in which to create the restored + // table. This instance must be the parent of the source backup. Values are + // of the form `projects//instances/`. + string parent = 1; + + // Required. The id of the table to create and restore to. This + // table must not already exist. The `table_id` appended to + // `parent` forms the full table name of the form + // `projects//instances//tables/`. + string table_id = 2; + + // Required. The source from which to restore. + oneof source { + // Name of the backup from which to restore. Values are of the form + // `projects//instances//clusters//backups/`. + string backup = 3; + } +} + +// Metadata type for the long-running operation returned by +// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. +message RestoreTableMetadata { + // Name of the table being created and restored to. + string name = 1; + + // The type of the restore source. + RestoreSourceType source_type = 2; + + // Information about the source used to restore the table, as specified by + // `source` in + // [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. + oneof source_info { + BackupInfo backup_info = 3; + } + + // If exists, the name of the long-running operation that will be used to + // track the post-restore optimization process to optimize the performance of + // the restored table. The metadata type of the long-running operation is + // [OptimizeRestoreTableMetadata][]. The response type is + // [Empty][google.protobuf.Empty]. This long-running operation may be + // automatically created by the system if applicable after the + // RestoreTable long-running operation completes successfully. This operation + // may not be created if the table is already optimized or the restore was + // not successful. + string optimize_table_operation_name = 4; + + // The progress of the + // [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] + // operation. + OperationProgress progress = 5; +} + +// Metadata type for the long-running operation used to track the progress +// of optimizations performed on a newly restored table. This long-running +// operation is automatically created by the system after the successful +// completion of a table restore, and cannot be cancelled. +message OptimizeRestoredTableMetadata { + // Name of the restored table being optimized. + string name = 1; + + // The progress of the post-restore optimizations. + OperationProgress progress = 2; +} diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py new file mode 100644 index 000000000..5ca167d87 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -0,0 +1,3574 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 +from google.cloud.bigtable_admin_v2.proto import ( + common_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2, +) +from google.cloud.bigtable_admin_v2.proto import ( + table_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2, +) +from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 +from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name="google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto", + package="google.bigtable.admin.v2", + syntax="proto3", + serialized_options=b'\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n?google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto\x1a\x30google/cloud/bigtable_admin_v2/proto/table.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xfc\x01\n\x12\x43reateTableRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x33\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.TableB\x03\xe0\x41\x02\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"\xb4\x01\n\x1e\x43reateTableFromSnapshotRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x0fsource_snapshot\x18\x03 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\x94\x01\n\x13\x44ropRowRangeRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"\xa8\x01\n\x11ListTablesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"z\n\x0fGetTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View"I\n\x12\x44\x65leteTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"\xda\x02\n\x1bModifyColumnFamiliesRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12^\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.ModificationB\x03\xe0\x41\x02\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"V\n\x1fGenerateConsistencyTokenRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"n\n\x17\x43heckConsistencyRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x1e\n\x11\x63onsistency_token\x18\x02 \x01(\tB\x03\xe0\x41\x02".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\xdc\x01\n\x14SnapshotTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x38\n\x07\x63luster\x18\x02 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x18\n\x0bsnapshot_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t"L\n\x12GetSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"v\n\x14ListSnapshotsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"O\n\x15\x44\x65leteSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x9d\x01\n\x13\x43reateBackupRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x16\n\tbackup_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x35\n\x06\x62\x61\x63kup\x18\x03 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02"\x98\x01\n\x14\x43reateBackupMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csource_table\x18\x02 \x01(\t\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"H\n\x10GetBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x82\x01\n\x13UpdateBackupRequest\x12\x35\n\x06\x62\x61\x63kup\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"K\n\x13\x44\x65leteBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x96\x01\n\x12ListBackupsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x05 \x01(\t"a\n\x13ListBackupsResponse\x12\x31\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x13RestoreTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x10\n\x06\x62\x61\x63kup\x18\x03 \x01(\tH\x00\x42\x08\n\x06source"\x98\x02\n\x14RestoreTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\x0bsource_type\x18\x02 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x12%\n\x1doptimize_table_operation_name\x18\x04 \x01(\t\x12=\n\x08progress\x18\x05 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgressB\r\n\x0bsource_info"l\n\x1dOptimizeRestoredTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12=\n\x08progress\x18\x02 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgress2\xc8$\n\x12\x42igtableTableAdmin\x12\xab\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"M\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\xda\x41\x15parent,table_id,table\x12\x8a\x02\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"\x95\x01\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\xda\x41\x1fparent,table_id,source_snapshot\xca\x41(\n\x05Table\x12\x1f\x43reateTableFromSnapshotMetadata\x12\xa4\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse";\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\xda\x41\x06parent\x12\x91\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"9\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\x8e\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"9\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\xcf\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"_\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\xda\x41\x12name,modifications\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe8\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"U\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\xda\x41\x04name\x12\xda\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"_\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\xda\x41\x16name,consistency_token\x12\xea\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation"\x89\x01\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\xda\x41$name,cluster,snapshot_id,description\xca\x41!\n\x08Snapshot\x12\x15SnapshotTableMetadata\x12\xa8\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"G\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xbb\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"I\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\xda\x41\x06parent\x12\xa2\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xe0\x01\n\x0c\x43reateBackup\x12-.google.bigtable.admin.v2.CreateBackupRequest\x1a\x1d.google.longrunning.Operation"\x81\x01\x82\xd3\xe4\x93\x02@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\x06\x62\x61\x63kup\xca\x41\x1e\n\x06\x42\x61\x63kup\x12\x14\x43reateBackupMetadata\xda\x41\x17parent,backup_id,backup\x12\xa0\x01\n\tGetBackup\x12*.google.bigtable.admin.v2.GetBackupRequest\x1a .google.bigtable.admin.v2.Backup"E\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xc3\x01\n\x0cUpdateBackup\x12-.google.bigtable.admin.v2.UpdateBackupRequest\x1a .google.bigtable.admin.v2.Backup"b\x82\xd3\xe4\x93\x02G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\x06\x62\x61\x63kup\xda\x41\x12\x62\x61\x63kup,update_mask\x12\x9c\x01\n\x0c\x44\x65leteBackup\x12-.google.bigtable.admin.v2.DeleteBackupRequest\x1a\x16.google.protobuf.Empty"E\x82\xd3\xe4\x93\x02\x38*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xb3\x01\n\x0bListBackups\x12,.google.bigtable.admin.v2.ListBackupsRequest\x1a-.google.bigtable.admin.v2.ListBackupsResponse"G\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{parent=projects/*/instances/*/clusters/*}/backups\xda\x41\x06parent\x12\xbb\x01\n\x0cRestoreTable\x12-.google.bigtable.admin.v2.RestoreTableRequest\x1a\x1d.google.longrunning.Operation"]\x82\xd3\xe4\x93\x02\x37"2/v2/{parent=projects/*/instances/*}/tables:restore:\x01*\xca\x41\x1d\n\x05Table\x12\x14RestoreTableMetadata\x12\x9c\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"Q\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\xf3\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\xa7\x01\x82\xd3\xe4\x93\x02\x8e\x01";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\x01*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xa4\x02\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"\xb8\x01\x82\xd3\xe4\x93\x02\x9a\x01"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\x01*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\xde\x02\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xbb\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xdf\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', + dependencies=[ + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.DESCRIPTOR, + google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, + google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, + google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, + google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, + google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, + google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, + google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + ], +) + + +_CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor( + name="Split", + full_name="google.bigtable.admin.v2.CreateTableRequest.Split", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.bigtable.admin.v2.CreateTableRequest.Split.key", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=767, + serialized_end=787, +) + +_CREATETABLEREQUEST = _descriptor.Descriptor( + name="CreateTableRequest", + full_name="google.bigtable.admin.v2.CreateTableRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.CreateTableRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="table_id", + full_name="google.bigtable.admin.v2.CreateTableRequest.table_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="table", + full_name="google.bigtable.admin.v2.CreateTableRequest.table", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="initial_splits", + full_name="google.bigtable.admin.v2.CreateTableRequest.initial_splits", + index=3, + number=4, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[_CREATETABLEREQUEST_SPLIT,], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=535, + serialized_end=787, +) + + +_CREATETABLEFROMSNAPSHOTREQUEST = _descriptor.Descriptor( + name="CreateTableFromSnapshotRequest", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="table_id", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.table_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="source_snapshot", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.source_snapshot", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=790, + serialized_end=970, +) + + +_DROPROWRANGEREQUEST = _descriptor.Descriptor( + name="DropRowRangeRequest", + full_name="google.bigtable.admin.v2.DropRowRangeRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DropRowRangeRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="row_key_prefix", + full_name="google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="delete_all_data_from_table", + full_name="google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table", + index=2, + number=3, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="target", + full_name="google.bigtable.admin.v2.DropRowRangeRequest.target", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=973, + serialized_end=1121, +) + + +_LISTTABLESREQUEST = _descriptor.Descriptor( + name="ListTablesRequest", + full_name="google.bigtable.admin.v2.ListTablesRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.ListTablesRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="view", + full_name="google.bigtable.admin.v2.ListTablesRequest.view", + index=1, + number=2, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="page_size", + full_name="google.bigtable.admin.v2.ListTablesRequest.page_size", + index=2, + number=4, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.bigtable.admin.v2.ListTablesRequest.page_token", + index=3, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1124, + serialized_end=1292, +) + + +_LISTTABLESRESPONSE = _descriptor.Descriptor( + name="ListTablesResponse", + full_name="google.bigtable.admin.v2.ListTablesResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="tables", + full_name="google.bigtable.admin.v2.ListTablesResponse.tables", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.bigtable.admin.v2.ListTablesResponse.next_page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1294, + serialized_end=1388, +) + + +_GETTABLEREQUEST = _descriptor.Descriptor( + name="GetTableRequest", + full_name="google.bigtable.admin.v2.GetTableRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GetTableRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="view", + full_name="google.bigtable.admin.v2.GetTableRequest.view", + index=1, + number=2, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1390, + serialized_end=1512, +) + + +_DELETETABLEREQUEST = _descriptor.Descriptor( + name="DeleteTableRequest", + full_name="google.bigtable.admin.v2.DeleteTableRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DeleteTableRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1514, + serialized_end=1587, +) + + +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION = _descriptor.Descriptor( + name="Modification", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="id", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="create", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="update", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="drop", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop", + index=3, + number=4, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="mod", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=1771, + serialized_end=1936, +) + +_MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( + name="ModifyColumnFamiliesRequest", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="modifications", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION,], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1590, + serialized_end=1936, +) + + +_GENERATECONSISTENCYTOKENREQUEST = _descriptor.Descriptor( + name="GenerateConsistencyTokenRequest", + full_name="google.bigtable.admin.v2.GenerateConsistencyTokenRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GenerateConsistencyTokenRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1938, + serialized_end=2024, +) + + +_GENERATECONSISTENCYTOKENRESPONSE = _descriptor.Descriptor( + name="GenerateConsistencyTokenResponse", + full_name="google.bigtable.admin.v2.GenerateConsistencyTokenResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="consistency_token", + full_name="google.bigtable.admin.v2.GenerateConsistencyTokenResponse.consistency_token", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2026, + serialized_end=2087, +) + + +_CHECKCONSISTENCYREQUEST = _descriptor.Descriptor( + name="CheckConsistencyRequest", + full_name="google.bigtable.admin.v2.CheckConsistencyRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.CheckConsistencyRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="consistency_token", + full_name="google.bigtable.admin.v2.CheckConsistencyRequest.consistency_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2089, + serialized_end=2199, +) + + +_CHECKCONSISTENCYRESPONSE = _descriptor.Descriptor( + name="CheckConsistencyResponse", + full_name="google.bigtable.admin.v2.CheckConsistencyResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="consistent", + full_name="google.bigtable.admin.v2.CheckConsistencyResponse.consistent", + index=0, + number=1, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2201, + serialized_end=2247, +) + + +_SNAPSHOTTABLEREQUEST = _descriptor.Descriptor( + name="SnapshotTableRequest", + full_name="google.bigtable.admin.v2.SnapshotTableRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.SnapshotTableRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="cluster", + full_name="google.bigtable.admin.v2.SnapshotTableRequest.cluster", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="snapshot_id", + full_name="google.bigtable.admin.v2.SnapshotTableRequest.snapshot_id", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="ttl", + full_name="google.bigtable.admin.v2.SnapshotTableRequest.ttl", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="description", + full_name="google.bigtable.admin.v2.SnapshotTableRequest.description", + index=4, + number=5, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2250, + serialized_end=2470, +) + + +_GETSNAPSHOTREQUEST = _descriptor.Descriptor( + name="GetSnapshotRequest", + full_name="google.bigtable.admin.v2.GetSnapshotRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GetSnapshotRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2472, + serialized_end=2548, +) + + +_LISTSNAPSHOTSREQUEST = _descriptor.Descriptor( + name="ListSnapshotsRequest", + full_name="google.bigtable.admin.v2.ListSnapshotsRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.ListSnapshotsRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="page_size", + full_name="google.bigtable.admin.v2.ListSnapshotsRequest.page_size", + index=1, + number=2, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.bigtable.admin.v2.ListSnapshotsRequest.page_token", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2550, + serialized_end=2668, +) + + +_LISTSNAPSHOTSRESPONSE = _descriptor.Descriptor( + name="ListSnapshotsResponse", + full_name="google.bigtable.admin.v2.ListSnapshotsResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="snapshots", + full_name="google.bigtable.admin.v2.ListSnapshotsResponse.snapshots", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.bigtable.admin.v2.ListSnapshotsResponse.next_page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2670, + serialized_end=2773, +) + + +_DELETESNAPSHOTREQUEST = _descriptor.Descriptor( + name="DeleteSnapshotRequest", + full_name="google.bigtable.admin.v2.DeleteSnapshotRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DeleteSnapshotRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2775, + serialized_end=2854, +) + + +_SNAPSHOTTABLEMETADATA = _descriptor.Descriptor( + name="SnapshotTableMetadata", + full_name="google.bigtable.admin.v2.SnapshotTableMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="original_request", + full_name="google.bigtable.admin.v2.SnapshotTableMetadata.original_request", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="request_time", + full_name="google.bigtable.admin.v2.SnapshotTableMetadata.request_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="finish_time", + full_name="google.bigtable.admin.v2.SnapshotTableMetadata.finish_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2857, + serialized_end=3053, +) + + +_CREATETABLEFROMSNAPSHOTMETADATA = _descriptor.Descriptor( + name="CreateTableFromSnapshotMetadata", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="original_request", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="request_time", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="finish_time", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3056, + serialized_end=3272, +) + + +_CREATEBACKUPREQUEST = _descriptor.Descriptor( + name="CreateBackupRequest", + full_name="google.bigtable.admin.v2.CreateBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.CreateBackupRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="backup_id", + full_name="google.bigtable.admin.v2.CreateBackupRequest.backup_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="backup", + full_name="google.bigtable.admin.v2.CreateBackupRequest.backup", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3275, + serialized_end=3432, +) + + +_CREATEBACKUPMETADATA = _descriptor.Descriptor( + name="CreateBackupMetadata", + full_name="google.bigtable.admin.v2.CreateBackupMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.CreateBackupMetadata.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="source_table", + full_name="google.bigtable.admin.v2.CreateBackupMetadata.source_table", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="start_time", + full_name="google.bigtable.admin.v2.CreateBackupMetadata.start_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_time", + full_name="google.bigtable.admin.v2.CreateBackupMetadata.end_time", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3435, + serialized_end=3587, +) + + +_GETBACKUPREQUEST = _descriptor.Descriptor( + name="GetBackupRequest", + full_name="google.bigtable.admin.v2.GetBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GetBackupRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3589, + serialized_end=3661, +) + + +_UPDATEBACKUPREQUEST = _descriptor.Descriptor( + name="UpdateBackupRequest", + full_name="google.bigtable.admin.v2.UpdateBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="backup", + full_name="google.bigtable.admin.v2.UpdateBackupRequest.backup", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="update_mask", + full_name="google.bigtable.admin.v2.UpdateBackupRequest.update_mask", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3664, + serialized_end=3794, +) + + +_DELETEBACKUPREQUEST = _descriptor.Descriptor( + name="DeleteBackupRequest", + full_name="google.bigtable.admin.v2.DeleteBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DeleteBackupRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3796, + serialized_end=3871, +) + + +_LISTBACKUPSREQUEST = _descriptor.Descriptor( + name="ListBackupsRequest", + full_name="google.bigtable.admin.v2.ListBackupsRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.ListBackupsRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="filter", + full_name="google.bigtable.admin.v2.ListBackupsRequest.filter", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="order_by", + full_name="google.bigtable.admin.v2.ListBackupsRequest.order_by", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="page_size", + full_name="google.bigtable.admin.v2.ListBackupsRequest.page_size", + index=3, + number=4, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.bigtable.admin.v2.ListBackupsRequest.page_token", + index=4, + number=5, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3874, + serialized_end=4024, +) + + +_LISTBACKUPSRESPONSE = _descriptor.Descriptor( + name="ListBackupsResponse", + full_name="google.bigtable.admin.v2.ListBackupsResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="backups", + full_name="google.bigtable.admin.v2.ListBackupsResponse.backups", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.bigtable.admin.v2.ListBackupsResponse.next_page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=4026, + serialized_end=4123, +) + + +_RESTORETABLEREQUEST = _descriptor.Descriptor( + name="RestoreTableRequest", + full_name="google.bigtable.admin.v2.RestoreTableRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.RestoreTableRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="table_id", + full_name="google.bigtable.admin.v2.RestoreTableRequest.table_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="backup", + full_name="google.bigtable.admin.v2.RestoreTableRequest.backup", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="source", + full_name="google.bigtable.admin.v2.RestoreTableRequest.source", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=4125, + serialized_end=4208, +) + + +_RESTORETABLEMETADATA = _descriptor.Descriptor( + name="RestoreTableMetadata", + full_name="google.bigtable.admin.v2.RestoreTableMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="source_type", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_type", + index=1, + number=2, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="backup_info", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.backup_info", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="optimize_table_operation_name", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.optimize_table_operation_name", + index=3, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="progress", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.progress", + index=4, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="source_info", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_info", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=4211, + serialized_end=4491, +) + + +_OPTIMIZERESTOREDTABLEMETADATA = _descriptor.Descriptor( + name="OptimizeRestoredTableMetadata", + full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="progress", + full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=4493, + serialized_end=4601, +) + +_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST +_CREATETABLEREQUEST.fields_by_name[ + "table" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE +) +_CREATETABLEREQUEST.fields_by_name[ + "initial_splits" +].message_type = _CREATETABLEREQUEST_SPLIT +_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( + _DROPROWRANGEREQUEST.fields_by_name["row_key_prefix"] +) +_DROPROWRANGEREQUEST.fields_by_name[ + "row_key_prefix" +].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] +_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( + _DROPROWRANGEREQUEST.fields_by_name["delete_all_data_from_table"] +) +_DROPROWRANGEREQUEST.fields_by_name[ + "delete_all_data_from_table" +].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] +_LISTTABLESREQUEST.fields_by_name[ + "view" +].enum_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW +) +_LISTTABLESRESPONSE.fields_by_name[ + "tables" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE +) +_GETTABLEREQUEST.fields_by_name[ + "view" +].enum_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "create" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "update" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["create"] +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "create" +].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["update"] +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "update" +].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["drop"] +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "drop" +].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] +_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name[ + "modifications" +].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION +_SNAPSHOTTABLEREQUEST.fields_by_name[ + "ttl" +].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_LISTSNAPSHOTSRESPONSE.fields_by_name[ + "snapshots" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._SNAPSHOT +) +_SNAPSHOTTABLEMETADATA.fields_by_name[ + "original_request" +].message_type = _SNAPSHOTTABLEREQUEST +_SNAPSHOTTABLEMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_SNAPSHOTTABLEMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ + "original_request" +].message_type = _CREATETABLEFROMSNAPSHOTREQUEST +_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEBACKUPREQUEST.fields_by_name[ + "backup" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP +) +_CREATEBACKUPMETADATA.fields_by_name[ + "start_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEBACKUPMETADATA.fields_by_name[ + "end_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATEBACKUPREQUEST.fields_by_name[ + "backup" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP +) +_UPDATEBACKUPREQUEST.fields_by_name[ + "update_mask" +].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK +_LISTBACKUPSRESPONSE.fields_by_name[ + "backups" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP +) +_RESTORETABLEREQUEST.oneofs_by_name["source"].fields.append( + _RESTORETABLEREQUEST.fields_by_name["backup"] +) +_RESTORETABLEREQUEST.fields_by_name[ + "backup" +].containing_oneof = _RESTORETABLEREQUEST.oneofs_by_name["source"] +_RESTORETABLEMETADATA.fields_by_name[ + "source_type" +].enum_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._RESTORESOURCETYPE +) +_RESTORETABLEMETADATA.fields_by_name[ + "backup_info" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUPINFO +) +_RESTORETABLEMETADATA.fields_by_name[ + "progress" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS +) +_RESTORETABLEMETADATA.oneofs_by_name["source_info"].fields.append( + _RESTORETABLEMETADATA.fields_by_name["backup_info"] +) +_RESTORETABLEMETADATA.fields_by_name[ + "backup_info" +].containing_oneof = _RESTORETABLEMETADATA.oneofs_by_name["source_info"] +_OPTIMIZERESTOREDTABLEMETADATA.fields_by_name[ + "progress" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS +) +DESCRIPTOR.message_types_by_name["CreateTableRequest"] = _CREATETABLEREQUEST +DESCRIPTOR.message_types_by_name[ + "CreateTableFromSnapshotRequest" +] = _CREATETABLEFROMSNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name["DropRowRangeRequest"] = _DROPROWRANGEREQUEST +DESCRIPTOR.message_types_by_name["ListTablesRequest"] = _LISTTABLESREQUEST +DESCRIPTOR.message_types_by_name["ListTablesResponse"] = _LISTTABLESRESPONSE +DESCRIPTOR.message_types_by_name["GetTableRequest"] = _GETTABLEREQUEST +DESCRIPTOR.message_types_by_name["DeleteTableRequest"] = _DELETETABLEREQUEST +DESCRIPTOR.message_types_by_name[ + "ModifyColumnFamiliesRequest" +] = _MODIFYCOLUMNFAMILIESREQUEST +DESCRIPTOR.message_types_by_name[ + "GenerateConsistencyTokenRequest" +] = _GENERATECONSISTENCYTOKENREQUEST +DESCRIPTOR.message_types_by_name[ + "GenerateConsistencyTokenResponse" +] = _GENERATECONSISTENCYTOKENRESPONSE +DESCRIPTOR.message_types_by_name["CheckConsistencyRequest"] = _CHECKCONSISTENCYREQUEST +DESCRIPTOR.message_types_by_name["CheckConsistencyResponse"] = _CHECKCONSISTENCYRESPONSE +DESCRIPTOR.message_types_by_name["SnapshotTableRequest"] = _SNAPSHOTTABLEREQUEST +DESCRIPTOR.message_types_by_name["GetSnapshotRequest"] = _GETSNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name["ListSnapshotsRequest"] = _LISTSNAPSHOTSREQUEST +DESCRIPTOR.message_types_by_name["ListSnapshotsResponse"] = _LISTSNAPSHOTSRESPONSE +DESCRIPTOR.message_types_by_name["DeleteSnapshotRequest"] = _DELETESNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name["SnapshotTableMetadata"] = _SNAPSHOTTABLEMETADATA +DESCRIPTOR.message_types_by_name[ + "CreateTableFromSnapshotMetadata" +] = _CREATETABLEFROMSNAPSHOTMETADATA +DESCRIPTOR.message_types_by_name["CreateBackupRequest"] = _CREATEBACKUPREQUEST +DESCRIPTOR.message_types_by_name["CreateBackupMetadata"] = _CREATEBACKUPMETADATA +DESCRIPTOR.message_types_by_name["GetBackupRequest"] = _GETBACKUPREQUEST +DESCRIPTOR.message_types_by_name["UpdateBackupRequest"] = _UPDATEBACKUPREQUEST +DESCRIPTOR.message_types_by_name["DeleteBackupRequest"] = _DELETEBACKUPREQUEST +DESCRIPTOR.message_types_by_name["ListBackupsRequest"] = _LISTBACKUPSREQUEST +DESCRIPTOR.message_types_by_name["ListBackupsResponse"] = _LISTBACKUPSRESPONSE +DESCRIPTOR.message_types_by_name["RestoreTableRequest"] = _RESTORETABLEREQUEST +DESCRIPTOR.message_types_by_name["RestoreTableMetadata"] = _RESTORETABLEMETADATA +DESCRIPTOR.message_types_by_name[ + "OptimizeRestoredTableMetadata" +] = _OPTIMIZERESTOREDTABLEMETADATA +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +CreateTableRequest = _reflection.GeneratedProtocolMessageType( + "CreateTableRequest", + (_message.Message,), + { + "Split": _reflection.GeneratedProtocolMessageType( + "Split", + (_message.Message,), + { + "DESCRIPTOR": _CREATETABLEREQUEST_SPLIT, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """An initial split point for a newly created table. + + Attributes: + key: + Row key to use as an initial tablet boundary. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) + }, + ), + "DESCRIPTOR": _CREATETABLEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Creat + eTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + + Attributes: + parent: + Required. The unique name of the instance in which to create + the table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id: + Required. The name by which the new table should be referred + to within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. Maximum 50 characters. + table: + Required. The Table to create. + initial_splits: + The optional list of row keys that will be used to initially + split the table into several tablets (tablets are similar to + HBase regions). Given two split keys, ``s1`` and ``s2``, three + tablets will be created, spanning the key ranges: ``[, s1), + [s1, s2), [s2, )``. Example: - Row keys := ``["a", "apple", + "custom", "customer_1", "customer_2",`` ``"other", "zz"]`` + - initial_split_keys := ``["apple", "customer_1", + "customer_2", "other"]`` - Key assignment: - Tablet 1 + ``[, apple) => {"a"}.`` - Tablet 2 + ``[apple, customer_1) => {"apple", "custom"}.`` - + Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - + Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - + Tablet 5 ``[other, ) => {"other", "zz"}.`` + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) + }, +) +_sym_db.RegisterMessage(CreateTableRequest) +_sym_db.RegisterMessage(CreateTableRequest.Split) + +CreateTableFromSnapshotRequest = _reflection.GeneratedProtocolMessageType( + "CreateTableFromSnapshotRequest", + (_message.Message,), + { + "DESCRIPTOR": _CREATETABLEFROMSNAPSHOTREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Creat + eTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.Create + TableFromSnapshot] Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently available to most + Cloud Bigtable customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. It is not + subject to any SLA or deprecation policy. + + Attributes: + parent: + Required. The unique name of the instance in which to create + the table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id: + Required. The name by which the new table should be referred + to within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. + source_snapshot: + Required. The unique name of the snapshot from which to + restore the table. The snapshot and the table must be in the + same instance. Values are of the form ``projects/{project}/ins + tances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotRequest) + }, +) +_sym_db.RegisterMessage(CreateTableFromSnapshotRequest) + +DropRowRangeRequest = _reflection.GeneratedProtocolMessageType( + "DropRowRangeRequest", + (_message.Message,), + { + "DESCRIPTOR": _DROPROWRANGEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropR + owRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + + Attributes: + name: + Required. The unique name of the table on which to drop a + range of rows. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + target: + Delete all rows or by prefix. + row_key_prefix: + Delete all rows that start with this row key prefix. Prefix + cannot be zero length. + delete_all_data_from_table: + Delete all rows in the table. Setting this to false is a no- + op. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) + }, +) +_sym_db.RegisterMessage(DropRowRangeRequest) + +ListTablesRequest = _reflection.GeneratedProtocolMessageType( + "ListTablesRequest", + (_message.Message,), + { + "DESCRIPTOR": _LISTTABLESREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListT + ables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Attributes: + parent: + Required. The unique name of the instance for which tables + should be listed. Values are of the form + ``projects/{project}/instances/{instance}``. + view: + The view to be applied to the returned tables’ fields. Only + NAME_ONLY view (default) and REPLICATION_VIEW are supported. + page_size: + Maximum number of results per page. A page_size of zero lets + the server choose the number of items to return. A page_size + which is strictly positive will return at most that many + items. A negative page_size will cause an error. Following + the first request, subsequent paginated calls are not required + to pass a page_size. If a page_size is set in subsequent + calls, it must match the page_size given in the first request. + page_token: + The value of ``next_page_token`` returned by a previous call. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) + }, +) +_sym_db.RegisterMessage(ListTablesRequest) + +ListTablesResponse = _reflection.GeneratedProtocolMessageType( + "ListTablesResponse", + (_message.Message,), + { + "DESCRIPTOR": _LISTTABLESRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.List + Tables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Attributes: + tables: + The tables present in the requested instance. + next_page_token: + Set if not all tables could be returned in a single response. + Pass this value to ``page_token`` in another request to get + the next page of results. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) + }, +) +_sym_db.RegisterMessage(ListTablesResponse) + +GetTableRequest = _reflection.GeneratedProtocolMessageType( + "GetTableRequest", + (_message.Message,), + { + "DESCRIPTOR": _GETTABLEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTa + ble][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + + Attributes: + name: + Required. The unique name of the requested table. Values are + of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + view: + The view to be applied to the returned table’s fields. + Defaults to ``SCHEMA_VIEW`` if unspecified. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) + }, +) +_sym_db.RegisterMessage(GetTableRequest) + +DeleteTableRequest = _reflection.GeneratedProtocolMessageType( + "DeleteTableRequest", + (_message.Message,), + { + "DESCRIPTOR": _DELETETABLEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Delet + eTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + + Attributes: + name: + Required. The unique name of the table to be deleted. Values + are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) + }, +) +_sym_db.RegisterMessage(DeleteTableRequest) + +ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType( + "ModifyColumnFamiliesRequest", + (_message.Message,), + { + "Modification": _reflection.GeneratedProtocolMessageType( + "Modification", + (_message.Message,), + { + "DESCRIPTOR": _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """A create, update, or delete of a particular column family. + + Attributes: + id: + The ID of the column family to be modified. + mod: + Column familiy modifications. + create: + Create a new column family with the specified schema, or fail + if one already exists with the given ID. + update: + Update an existing column family to the specified schema, or + fail if no column family exists with the given ID. + drop: + Drop (delete) the column family with the given ID, or fail if + no such family exists. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) + }, + ), + "DESCRIPTOR": _MODIFYCOLUMNFAMILIESREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Modif + yColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyCol + umnFamilies] + + Attributes: + name: + Required. The unique name of the table whose families should + be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + modifications: + Required. Modifications to be atomically applied to the + specified table’s families. Entries are applied in order, + meaning that earlier modifications can be masked by later ones + (in the case of repeated updates to the same family, for + example). + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) + }, +) +_sym_db.RegisterMessage(ModifyColumnFamiliesRequest) +_sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification) + +GenerateConsistencyTokenRequest = _reflection.GeneratedProtocolMessageType( + "GenerateConsistencyTokenRequest", + (_message.Message,), + { + "DESCRIPTOR": _GENERATECONSISTENCYTOKENREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Gener + ateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.Gener + ateConsistencyToken] + + Attributes: + name: + Required. The unique name of the Table for which to create a + consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenRequest) + }, +) +_sym_db.RegisterMessage(GenerateConsistencyTokenRequest) + +GenerateConsistencyTokenResponse = _reflection.GeneratedProtocolMessageType( + "GenerateConsistencyTokenResponse", + (_message.Message,), + { + "DESCRIPTOR": _GENERATECONSISTENCYTOKENRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.Gene + rateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.Gene + rateConsistencyToken] + + Attributes: + consistency_token: + The generated consistency token. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenResponse) + }, +) +_sym_db.RegisterMessage(GenerateConsistencyTokenResponse) + +CheckConsistencyRequest = _reflection.GeneratedProtocolMessageType( + "CheckConsistencyRequest", + (_message.Message,), + { + "DESCRIPTOR": _CHECKCONSISTENCYREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Check + Consistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsiste + ncy] + + Attributes: + name: + Required. The unique name of the Table for which to check + replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + consistency_token: + Required. The token created using GenerateConsistencyToken for + the Table. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyRequest) + }, +) +_sym_db.RegisterMessage(CheckConsistencyRequest) + +CheckConsistencyResponse = _reflection.GeneratedProtocolMessageType( + "CheckConsistencyResponse", + (_message.Message,), + { + "DESCRIPTOR": _CHECKCONSISTENCYRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.Chec + kConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsist + ency] + + Attributes: + consistent: + True only if the token is consistent. A token is consistent if + replication has caught up with the restrictions specified in + the request. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyResponse) + }, +) +_sym_db.RegisterMessage(CheckConsistencyResponse) + +SnapshotTableRequest = _reflection.GeneratedProtocolMessageType( + "SnapshotTableRequest", + (_message.Message,), + { + "DESCRIPTOR": _SNAPSHOTTABLEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Snaps + hotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. + + Attributes: + name: + Required. The unique name of the table to have the snapshot + taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + cluster: + Required. The name of the cluster where the snapshot will be + created in. Values are of the form ``projects/{project}/instan + ces/{instance}/clusters/{cluster}``. + snapshot_id: + Required. The ID by which the new snapshot should be referred + to within the parent cluster, e.g., ``mysnapshot`` of the + form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than ``projects/{ + project}/instances/{instance}/clusters/{cluster}/snapshots/mys + napshot``. + ttl: + The amount of time that the new snapshot can stay active after + it is created. Once ‘ttl’ expires, the snapshot will get + deleted. The maximum amount of time a snapshot can stay active + is 7 days. If ‘ttl’ is not specified, the default value of 24 + hours will be used. + description: + Description of the snapshot. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableRequest) + }, +) +_sym_db.RegisterMessage(SnapshotTableRequest) + +GetSnapshotRequest = _reflection.GeneratedProtocolMessageType( + "GetSnapshotRequest", + (_message.Message,), + { + "DESCRIPTOR": _GETSNAPSHOTREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSn + apshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. + + Attributes: + name: + Required. The unique name of the requested snapshot. Values + are of the form ``projects/{project}/instances/{instance}/clus + ters/{cluster}/snapshots/{snapshot}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetSnapshotRequest) + }, +) +_sym_db.RegisterMessage(GetSnapshotRequest) + +ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType( + "ListSnapshotsRequest", + (_message.Message,), + { + "DESCRIPTOR": _LISTSNAPSHOTSREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListS + napshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. + + Attributes: + parent: + Required. The unique name of the cluster for which snapshots + should be listed. Values are of the form ``projects/{project}/ + instances/{instance}/clusters/{cluster}``. Use ``{cluster} = + '-'`` to list snapshots for all clusters in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + page_size: + The maximum number of snapshots to return per page. CURRENTLY + UNIMPLEMENTED AND IGNORED. + page_token: + The value of ``next_page_token`` returned by a previous call. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsRequest) + }, +) +_sym_db.RegisterMessage(ListSnapshotsRequest) + +ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType( + "ListSnapshotsResponse", + (_message.Message,), + { + "DESCRIPTOR": _LISTSNAPSHOTSRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.List + Snapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. + + Attributes: + snapshots: + The snapshots present in the requested cluster. + next_page_token: + Set if not all snapshots could be returned in a single + response. Pass this value to ``page_token`` in another request + to get the next page of results. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsResponse) + }, +) +_sym_db.RegisterMessage(ListSnapshotsResponse) + +DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType( + "DeleteSnapshotRequest", + (_message.Message,), + { + "DESCRIPTOR": _DELETESNAPSHOTREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Delet + eSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. + + Attributes: + name: + Required. The unique name of the snapshot to be deleted. + Values are of the form ``projects/{project}/instances/{instanc + e}/clusters/{cluster}/snapshots/{snapshot}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteSnapshotRequest) + }, +) +_sym_db.RegisterMessage(DeleteSnapshotRequest) + +SnapshotTableMetadata = _reflection.GeneratedProtocolMessageType( + "SnapshotTableMetadata", + (_message.Message,), + { + "DESCRIPTOR": _SNAPSHOTTABLEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The metadata for the Operation returned by SnapshotTable. Note: This + is a private alpha release of Cloud Bigtable snapshots. This feature + is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Attributes: + original_request: + The request that prompted the initiation of this SnapshotTable + operation. + request_time: + The time at which the original request was received. + finish_time: + The time at which the operation failed or was completed + successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableMetadata) + }, +) +_sym_db.RegisterMessage(SnapshotTableMetadata) + +CreateTableFromSnapshotMetadata = _reflection.GeneratedProtocolMessageType( + "CreateTableFromSnapshotMetadata", + (_message.Message,), + { + "DESCRIPTOR": _CREATETABLEFROMSNAPSHOTMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The metadata for the Operation returned by CreateTableFromSnapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. + + Attributes: + original_request: + The request that prompted the initiation of this + CreateTableFromSnapshot operation. + request_time: + The time at which the original request was received. + finish_time: + The time at which the operation failed or was completed + successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotMetadata) + }, +) +_sym_db.RegisterMessage(CreateTableFromSnapshotMetadata) + +CreateBackupRequest = _reflection.GeneratedProtocolMessageType( + "CreateBackupRequest", + (_message.Message,), + { + "DESCRIPTOR": _CREATEBACKUPREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableA + dmin.CreateBackup]. + + Attributes: + parent: + Required. This must be one of the clusters in the instance in + which this table is located. The backup will be stored in this + cluster. Values are of the form ``projects/{project}/instances + /{instance}/clusters/{cluster}``. + backup_id: + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup name, + of the form: ``projects/{project}/instances/{instance}/cluster + s/{cluster}/backups/{backup_id}``. This string must be between + 1 and 50 characters in length and match the regex [_a- + zA-Z0-9][-_.a-zA-Z0-9]*. + backup: + Required. The backup to create. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateBackupRequest) + }, +) +_sym_db.RegisterMessage(CreateBackupRequest) + +CreateBackupMetadata = _reflection.GeneratedProtocolMessageType( + "CreateBackupMetadata", + (_message.Message,), + { + "DESCRIPTOR": _CREATEBACKUPMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Metadata type for the operation returned by [CreateBackup][google.bigt + able.admin.v2.BigtableTableAdmin.CreateBackup]. + + Attributes: + name: + The name of the backup being created. + source_table: + The name of the table the backup is created from. + start_time: + The time at which this operation started. + end_time: + If set, the time at which this operation finished or was + cancelled. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateBackupMetadata) + }, +) +_sym_db.RegisterMessage(CreateBackupMetadata) + +GetBackupRequest = _reflection.GeneratedProtocolMessageType( + "GetBackupRequest", + (_message.Message,), + { + "DESCRIPTOR": _GETBACKUPREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + + Attributes: + name: + Required. Name of the backup. Values are of the form ``project + s/{project}/instances/{instance}/clusters/{cluster}/backups/{b + ackup}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetBackupRequest) + }, +) +_sym_db.RegisterMessage(GetBackupRequest) + +UpdateBackupRequest = _reflection.GeneratedProtocolMessageType( + "UpdateBackupRequest", + (_message.Message,), + { + "DESCRIPTOR": _UPDATEBACKUPREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableA + dmin.UpdateBackup]. + + Attributes: + backup: + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only supported + for the following fields: \* ``backup.expire_time``. + update_mask: + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be updated. + This mask is relative to the Backup resource, not to the + request message. The field mask must always be specified; this + prevents any future fields from being erased accidentally by + clients that do not know about them. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateBackupRequest) + }, +) +_sym_db.RegisterMessage(UpdateBackupRequest) + +DeleteBackupRequest = _reflection.GeneratedProtocolMessageType( + "DeleteBackupRequest", + (_message.Message,), + { + "DESCRIPTOR": _DELETEBACKUPREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableA + dmin.DeleteBackup]. + + Attributes: + name: + Required. Name of the backup to delete. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/b + ackups/{backup}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteBackupRequest) + }, +) +_sym_db.RegisterMessage(DeleteBackupRequest) + +ListBackupsRequest = _reflection.GeneratedProtocolMessageType( + "ListBackupsRequest", + (_message.Message,), + { + "DESCRIPTOR": _LISTBACKUPSREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAd + min.ListBackups]. + + Attributes: + parent: + Required. The cluster to list backups from. Values are of the + form ``projects/{project}/instances/{instance}/clusters/{clust + er}``. Use ``{cluster} = '-'`` to list backups for all + clusters in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + filter: + A filter expression that filters backups listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a boolean. + The comparison operator must be <, >, <=, >=, !=, =, or :. + Colon ‘:’ represents a HAS operator which is roughly + synonymous with equality. Filter rules are case insensitive. + The fields eligible for filtering are: \* ``name`` \* + ``source_table`` \* ``state`` \* ``start_time`` (and values + are of the format YYYY-MM-DDTHH:MM:SSZ) \* ``end_time`` (and + values are of the format YYYY-MM-DDTHH:MM:SSZ) \* + ``expire_time`` (and values are of the format YYYY-MM- + DDTHH:MM:SSZ) \* ``size_bytes`` To filter on multiple + expressions, provide each separate expression within + parentheses. By default, each expression is an AND expression. + However, you can include AND, OR, and NOT expressions + explicitly. Some examples of using filters are: - + ``name:"exact"`` –> The backup’s name is the string “exact”. - + ``name:howl`` –> The backup’s name contains the string “howl”. + - ``source_table:prod`` –> The source_table’s name contains + the string “prod”. - ``state:CREATING`` –> The backup is + pending creation. - ``state:READY`` –> The backup is fully + created and ready for use. - ``(name:howl) AND (start_time < + \"2018-03-28T14:50:00Z\")`` –> The backup name contains the + string “howl” and start_time of the backup is before + 2018-03-28T14:50:00Z. - ``size_bytes > 10000000000`` –> The + backup’s size is greater than 10GB + order_by: + An expression for specifying the sort order of the results of + the request. The string value should specify one or more + fields in [Backup][google.bigtable.admin.v2.Backup]. The full + syntax is described at https://aip.dev/132#ordering. Fields + supported are: \* name \* source_table \* expire_time \* + start_time \* end_time \* size_bytes \* state For example, + “start_time”. The default sorting order is ascending. To + specify descending order for the field, a suffix " desc" + should be appended to the field name. For example, “start_time + desc”. Redundant space characters in the syntax are + insigificant. If order_by is empty, results will be sorted by + ``start_time`` in descending order starting from the most + recently created backup. + page_size: + Number of backups to be returned in the response. If 0 or + less, defaults to the server’s maximum allowed page size. + page_token: + If non-empty, ``page_token`` should contain a [next_page_token + ][google.bigtable.admin.v2.ListBackupsResponse.next_page_token + ] from a previous [ListBackupsResponse][google.bigtable.admin. + v2.ListBackupsResponse] to the same ``parent`` and with the + same ``filter``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListBackupsRequest) + }, +) +_sym_db.RegisterMessage(ListBackupsRequest) + +ListBackupsResponse = _reflection.GeneratedProtocolMessageType( + "ListBackupsResponse", + (_message.Message,), + { + "DESCRIPTOR": _LISTBACKUPSRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The response for [ListBackups][google.bigtable.admin.v2.BigtableTableA + dmin.ListBackups]. + + Attributes: + backups: + The list of matching backups. + next_page_token: + \ ``next_page_token`` can be sent in a subsequent [ListBackups + ][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] + call to fetch more of the matching backups. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListBackupsResponse) + }, +) +_sym_db.RegisterMessage(ListBackupsResponse) + +RestoreTableRequest = _reflection.GeneratedProtocolMessageType( + "RestoreTableRequest", + (_message.Message,), + { + "DESCRIPTOR": _RESTORETABLEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableA + dmin.RestoreTable]. + + Attributes: + parent: + Required. The name of the instance in which to create the + restored table. This instance must be the parent of the source + backup. Values are of the form + ``projects//instances/``. + table_id: + Required. The id of the table to create and restore to. This + table must not already exist. The ``table_id`` appended to + ``parent`` forms the full table name of the form + ``projects//instances//tables/``. + source: + Required. The source from which to restore. + backup: + Name of the backup from which to restore. Values are of the + form ``projects//instances//clusters//backups/``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableRequest) + }, +) +_sym_db.RegisterMessage(RestoreTableRequest) + +RestoreTableMetadata = _reflection.GeneratedProtocolMessageType( + "RestoreTableMetadata", + (_message.Message,), + { + "DESCRIPTOR": _RESTORETABLEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Metadata type for the long-running operation returned by [RestoreTable + ][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + Attributes: + name: + Name of the table being created and restored to. + source_type: + The type of the restore source. + source_info: + Information about the source used to restore the table, as + specified by ``source`` in [RestoreTableRequest][google.bigtab + le.admin.v2.RestoreTableRequest]. + optimize_table_operation_name: + If exists, the name of the long-running operation that will be + used to track the post-restore optimization process to + optimize the performance of the restored table. The metadata + type of the long-running operation is + [OptimizeRestoreTableMetadata][]. The response type is + [Empty][google.protobuf.Empty]. This long-running operation + may be automatically created by the system if applicable after + the RestoreTable long-running operation completes + successfully. This operation may not be created if the table + is already optimized or the restore was not successful. + progress: + The progress of the [RestoreTable][google.bigtable.admin.v2.Bi + gtableTableAdmin.RestoreTable] operation. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableMetadata) + }, +) +_sym_db.RegisterMessage(RestoreTableMetadata) + +OptimizeRestoredTableMetadata = _reflection.GeneratedProtocolMessageType( + "OptimizeRestoredTableMetadata", + (_message.Message,), + { + "DESCRIPTOR": _OPTIMIZERESTOREDTABLEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Metadata type for the long-running operation used to track the + progress of optimizations performed on a newly restored table. This + long-running operation is automatically created by the system after + the successful completion of a table restore, and cannot be cancelled. + + Attributes: + name: + Name of the restored table being optimized. + progress: + The progress of the post-restore optimizations. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.OptimizeRestoredTableMetadata) + }, +) +_sym_db.RegisterMessage(OptimizeRestoredTableMetadata) + + +DESCRIPTOR._options = None +_CREATETABLEREQUEST.fields_by_name["parent"]._options = None +_CREATETABLEREQUEST.fields_by_name["table_id"]._options = None +_CREATETABLEREQUEST.fields_by_name["table"]._options = None +_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["parent"]._options = None +_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["table_id"]._options = None +_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["source_snapshot"]._options = None +_DROPROWRANGEREQUEST.fields_by_name["name"]._options = None +_LISTTABLESREQUEST.fields_by_name["parent"]._options = None +_GETTABLEREQUEST.fields_by_name["name"]._options = None +_DELETETABLEREQUEST.fields_by_name["name"]._options = None +_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name["name"]._options = None +_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name["modifications"]._options = None +_GENERATECONSISTENCYTOKENREQUEST.fields_by_name["name"]._options = None +_CHECKCONSISTENCYREQUEST.fields_by_name["name"]._options = None +_CHECKCONSISTENCYREQUEST.fields_by_name["consistency_token"]._options = None +_SNAPSHOTTABLEREQUEST.fields_by_name["name"]._options = None +_SNAPSHOTTABLEREQUEST.fields_by_name["cluster"]._options = None +_SNAPSHOTTABLEREQUEST.fields_by_name["snapshot_id"]._options = None +_GETSNAPSHOTREQUEST.fields_by_name["name"]._options = None +_LISTSNAPSHOTSREQUEST.fields_by_name["parent"]._options = None +_DELETESNAPSHOTREQUEST.fields_by_name["name"]._options = None +_CREATEBACKUPREQUEST.fields_by_name["parent"]._options = None +_CREATEBACKUPREQUEST.fields_by_name["backup_id"]._options = None +_CREATEBACKUPREQUEST.fields_by_name["backup"]._options = None +_GETBACKUPREQUEST.fields_by_name["name"]._options = None +_UPDATEBACKUPREQUEST.fields_by_name["backup"]._options = None +_UPDATEBACKUPREQUEST.fields_by_name["update_mask"]._options = None +_DELETEBACKUPREQUEST.fields_by_name["name"]._options = None +_LISTBACKUPSREQUEST.fields_by_name["parent"]._options = None + +_BIGTABLETABLEADMIN = _descriptor.ServiceDescriptor( + name="BigtableTableAdmin", + full_name="google.bigtable.admin.v2.BigtableTableAdmin", + file=DESCRIPTOR, + index=0, + serialized_options=b"\312A\034bigtableadmin.googleapis.com\322A\273\002https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", + create_key=_descriptor._internal_create_key, + serialized_start=4604, + serialized_end=9284, + methods=[ + _descriptor.MethodDescriptor( + name="CreateTable", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", + index=0, + containing_service=None, + input_type=_CREATETABLEREQUEST, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, + serialized_options=b'\202\323\344\223\002/"*/v2/{parent=projects/*/instances/*}/tables:\001*\332A\025parent,table_id,table', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="CreateTableFromSnapshot", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot", + index=1, + containing_service=None, + input_type=_CREATETABLEFROMSNAPSHOTREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=b'\202\323\344\223\002B"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\001*\332A\037parent,table_id,source_snapshot\312A(\n\005Table\022\037CreateTableFromSnapshotMetadata', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="ListTables", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListTables", + index=2, + containing_service=None, + input_type=_LISTTABLESREQUEST, + output_type=_LISTTABLESRESPONSE, + serialized_options=b"\202\323\344\223\002,\022*/v2/{parent=projects/*/instances/*}/tables\332A\006parent", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="GetTable", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetTable", + index=3, + containing_service=None, + input_type=_GETTABLEREQUEST, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, + serialized_options=b"\202\323\344\223\002,\022*/v2/{name=projects/*/instances/*/tables/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="DeleteTable", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable", + index=4, + containing_service=None, + input_type=_DELETETABLEREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + serialized_options=b"\202\323\344\223\002,**/v2/{name=projects/*/instances/*/tables/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="ModifyColumnFamilies", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", + index=5, + containing_service=None, + input_type=_MODIFYCOLUMNFAMILIESREQUEST, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, + serialized_options=b'\202\323\344\223\002D"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\001*\332A\022name,modifications', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="DropRowRange", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", + index=6, + containing_service=None, + input_type=_DROPROWRANGEREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + serialized_options=b'\202\323\344\223\002<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\001*', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="GenerateConsistencyToken", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", + index=7, + containing_service=None, + input_type=_GENERATECONSISTENCYTOKENREQUEST, + output_type=_GENERATECONSISTENCYTOKENRESPONSE, + serialized_options=b'\202\323\344\223\002H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\001*\332A\004name', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="CheckConsistency", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", + index=8, + containing_service=None, + input_type=_CHECKCONSISTENCYREQUEST, + output_type=_CHECKCONSISTENCYRESPONSE, + serialized_options=b'\202\323\344\223\002@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\001*\332A\026name,consistency_token', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="SnapshotTable", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable", + index=9, + containing_service=None, + input_type=_SNAPSHOTTABLEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=b'\202\323\344\223\0028"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\001*\332A$name,cluster,snapshot_id,description\312A!\n\010Snapshot\022\025SnapshotTableMetadata', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="GetSnapshot", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot", + index=10, + containing_service=None, + input_type=_GETSNAPSHOTREQUEST, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._SNAPSHOT, + serialized_options=b"\202\323\344\223\002:\0228/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="ListSnapshots", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots", + index=11, + containing_service=None, + input_type=_LISTSNAPSHOTSREQUEST, + output_type=_LISTSNAPSHOTSRESPONSE, + serialized_options=b"\202\323\344\223\002:\0228/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\332A\006parent", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="DeleteSnapshot", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot", + index=12, + containing_service=None, + input_type=_DELETESNAPSHOTREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + serialized_options=b"\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="CreateBackup", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup", + index=13, + containing_service=None, + input_type=_CREATEBACKUPREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=b'\202\323\344\223\002@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\006backup\312A\036\n\006Backup\022\024CreateBackupMetadata\332A\027parent,backup_id,backup', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="GetBackup", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetBackup", + index=14, + containing_service=None, + input_type=_GETBACKUPREQUEST, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP, + serialized_options=b"\202\323\344\223\0028\0226/v2/{name=projects/*/instances/*/clusters/*/backups/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="UpdateBackup", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup", + index=15, + containing_service=None, + input_type=_UPDATEBACKUPREQUEST, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP, + serialized_options=b"\202\323\344\223\002G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\006backup\332A\022backup,update_mask", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="DeleteBackup", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup", + index=16, + containing_service=None, + input_type=_DELETEBACKUPREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + serialized_options=b"\202\323\344\223\0028*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="ListBackups", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListBackups", + index=17, + containing_service=None, + input_type=_LISTBACKUPSREQUEST, + output_type=_LISTBACKUPSRESPONSE, + serialized_options=b"\202\323\344\223\0028\0226/v2/{parent=projects/*/instances/*/clusters/*}/backups\332A\006parent", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="RestoreTable", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable", + index=18, + containing_service=None, + input_type=_RESTORETABLEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=b'\202\323\344\223\0027"2/v2/{parent=projects/*/instances/*}/tables:restore:\001*\312A\035\n\005Table\022\024RestoreTableMetadata', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="GetIamPolicy", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", + index=19, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, + output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, + serialized_options=b'\202\323\344\223\002@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\001*\332A\010resource', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="SetIamPolicy", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", + index=20, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, + output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, + serialized_options=b'\202\323\344\223\002\216\001";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\001*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\001*\332A\017resource,policy', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="TestIamPermissions", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", + index=21, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, + output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, + serialized_options=b'\202\323\344\223\002\232\001"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\001*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\001*\332A\024resource,permissions', + create_key=_descriptor._internal_create_key, + ), + ], +) +_sym_db.RegisterServiceDescriptor(_BIGTABLETABLEADMIN) + +DESCRIPTOR.services_by_name["BigtableTableAdmin"] = _BIGTABLETABLEADMIN + +# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py new file mode 100644 index 000000000..2b8d46e20 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py @@ -0,0 +1,1090 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from google.cloud.bigtable_admin_v2.proto import ( + bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2, +) +from google.cloud.bigtable_admin_v2.proto import ( + table_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2, +) +from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 +from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 + + +class BigtableTableAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + + + Provides access to the table schemas only, not the data stored within + the tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateTable = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, + ) + self.CreateTableFromSnapshot = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.ListTables = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, + ) + self.GetTable = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, + ) + self.DeleteTable = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.ModifyColumnFamilies = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, + ) + self.DropRowRange = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.GenerateConsistencyToken = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, + ) + self.CheckConsistency = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, + ) + self.SnapshotTable = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetSnapshot = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, + ) + self.ListSnapshots = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, + ) + self.DeleteSnapshot = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.CreateBackup = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetBackup = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, + ) + self.UpdateBackup = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, + ) + self.DeleteBackup = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.ListBackups = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString, + ) + self.RestoreTable = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetIamPolicy = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + ) + self.SetIamPolicy = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + ) + self.TestIamPermissions = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, + ) + + +class BigtableTableAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + + + Provides access to the table schemas only, not the data stored within + the tables. + """ + + def CreateTable(self, request, context): + """Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CreateTableFromSnapshot(self, request, context): + """Creates a new table from the specified snapshot. The target table must + not exist. The snapshot and the table must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListTables(self, request, context): + """Lists all tables served from a specified instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetTable(self, request, context): + """Gets metadata information about the specified table. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteTable(self, request, context): + """Permanently deletes a specified table and all of its data. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ModifyColumnFamilies(self, request, context): + """Performs a series of column family modifications on the specified table. + Either all or none of the modifications will occur before this method + returns, but data requests received prior to that point may see a table + where only some modifications have taken effect. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DropRowRange(self, request, context): + """Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GenerateConsistencyToken(self, request, context): + """Generates a consistency token for a Table, which can be used in + CheckConsistency to check whether mutations to the table that finished + before this call started have been replicated. The tokens will be available + for 90 days. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CheckConsistency(self, request, context): + """Checks replication consistency based on a consistency token, that is, if + replication has caught up based on the conditions specified in the token + and the check request. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def SnapshotTable(self, request, context): + """Creates a new snapshot in the specified cluster from the specified + source table. The cluster and the table must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetSnapshot(self, request, context): + """Gets metadata information about the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListSnapshots(self, request, context): + """Lists all snapshots associated with the specified cluster. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteSnapshot(self, request, context): + """Permanently deletes the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CreateBackup(self, request, context): + """Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be used to + track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The + [response][google.longrunning.Operation.response] field type is + [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the + returned operation will stop the creation and delete the backup. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetBackup(self, request, context): + """Gets metadata on a pending or completed Cloud Bigtable Backup. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateBackup(self, request, context): + """Updates a pending or completed Cloud Bigtable Backup. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteBackup(self, request, context): + """Deletes a pending or completed Cloud Bigtable backup. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListBackups(self, request, context): + """Lists Cloud Bigtable backups. Returns both completed and pending + backups. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def RestoreTable(self, request, context): + """Create a new table by restoring from a completed backup. The new table + must be in the same instance as the instance containing the backup. The + returned table [long-running operation][google.longrunning.Operation] can + be used to track the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetIamPolicy(self, request, context): + """Gets the access control policy for a resource. + Returns an empty policy if the resource exists but does not have a policy + set. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def SetIamPolicy(self, request, context): + """Sets the access control policy on a Table or Backup resource. + Replaces any existing policy. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def TestIamPermissions(self, request, context): + """Returns permissions that the caller has on the specified table resource. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + +def add_BigtableTableAdminServicer_to_server(servicer, server): + rpc_method_handlers = { + "CreateTable": grpc.unary_unary_rpc_method_handler( + servicer.CreateTable, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, + ), + "CreateTableFromSnapshot": grpc.unary_unary_rpc_method_handler( + servicer.CreateTableFromSnapshot, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "ListTables": grpc.unary_unary_rpc_method_handler( + servicer.ListTables, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString, + ), + "GetTable": grpc.unary_unary_rpc_method_handler( + servicer.GetTable, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, + ), + "DeleteTable": grpc.unary_unary_rpc_method_handler( + servicer.DeleteTable, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "ModifyColumnFamilies": grpc.unary_unary_rpc_method_handler( + servicer.ModifyColumnFamilies, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, + ), + "DropRowRange": grpc.unary_unary_rpc_method_handler( + servicer.DropRowRange, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "GenerateConsistencyToken": grpc.unary_unary_rpc_method_handler( + servicer.GenerateConsistencyToken, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString, + ), + "CheckConsistency": grpc.unary_unary_rpc_method_handler( + servicer.CheckConsistency, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString, + ), + "SnapshotTable": grpc.unary_unary_rpc_method_handler( + servicer.SnapshotTable, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetSnapshot": grpc.unary_unary_rpc_method_handler( + servicer.GetSnapshot, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString, + ), + "ListSnapshots": grpc.unary_unary_rpc_method_handler( + servicer.ListSnapshots, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString, + ), + "DeleteSnapshot": grpc.unary_unary_rpc_method_handler( + servicer.DeleteSnapshot, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "CreateBackup": grpc.unary_unary_rpc_method_handler( + servicer.CreateBackup, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetBackup": grpc.unary_unary_rpc_method_handler( + servicer.GetBackup, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString, + ), + "UpdateBackup": grpc.unary_unary_rpc_method_handler( + servicer.UpdateBackup, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString, + ), + "DeleteBackup": grpc.unary_unary_rpc_method_handler( + servicer.DeleteBackup, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "ListBackups": grpc.unary_unary_rpc_method_handler( + servicer.ListBackups, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.SerializeToString, + ), + "RestoreTable": grpc.unary_unary_rpc_method_handler( + servicer.RestoreTable, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetIamPolicy": grpc.unary_unary_rpc_method_handler( + servicer.GetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + "SetIamPolicy": grpc.unary_unary_rpc_method_handler( + servicer.SetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + "TestIamPermissions": grpc.unary_unary_rpc_method_handler( + servicer.TestIamPermissions, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + "google.bigtable.admin.v2.BigtableTableAdmin", rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) + + +# This class is part of an EXPERIMENTAL API. +class BigtableTableAdmin(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + + + Provides access to the table schemas only, not the data stored within + the tables. + """ + + @staticmethod + def CreateTable( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def CreateTableFromSnapshot( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ListTables( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetTable( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DeleteTable( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ModifyColumnFamilies( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DropRowRange( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GenerateConsistencyToken( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def CheckConsistency( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def SnapshotTable( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetSnapshot( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ListSnapshots( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DeleteSnapshot( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def CreateBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def UpdateBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DeleteBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ListBackups( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def RestoreTable( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetIamPolicy( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", + google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, + google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def SetIamPolicy( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", + google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, + google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def TestIamPermissions( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", + google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, + google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/google/cloud/bigtable_admin_v2/proto/common_pb2.py new file mode 100644 index 000000000..09233cff5 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/proto/common_pb2.py @@ -0,0 +1,188 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/bigtable_admin_v2/proto/common.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name="google/cloud/bigtable_admin_v2/proto/common.proto", + package="google.bigtable.admin.v2", + syntax="proto3", + serialized_options=b'\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n1google/cloud/bigtable_admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/protobuf/timestamp.proto"\x8b\x01\n\x11OperationProgress\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xd3\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', + dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,], +) + +_STORAGETYPE = _descriptor.EnumDescriptor( + name="StorageType", + full_name="google.bigtable.admin.v2.StorageType", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="STORAGE_TYPE_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="SSD", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="HDD", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=254, + serialized_end=315, +) +_sym_db.RegisterEnumDescriptor(_STORAGETYPE) + +StorageType = enum_type_wrapper.EnumTypeWrapper(_STORAGETYPE) +STORAGE_TYPE_UNSPECIFIED = 0 +SSD = 1 +HDD = 2 + + +_OPERATIONPROGRESS = _descriptor.Descriptor( + name="OperationProgress", + full_name="google.bigtable.admin.v2.OperationProgress", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="progress_percent", + full_name="google.bigtable.admin.v2.OperationProgress.progress_percent", + index=0, + number=1, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="start_time", + full_name="google.bigtable.admin.v2.OperationProgress.start_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_time", + full_name="google.bigtable.admin.v2.OperationProgress.end_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=113, + serialized_end=252, +) + +_OPERATIONPROGRESS.fields_by_name[ + "start_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_OPERATIONPROGRESS.fields_by_name[ + "end_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +DESCRIPTOR.message_types_by_name["OperationProgress"] = _OPERATIONPROGRESS +DESCRIPTOR.enum_types_by_name["StorageType"] = _STORAGETYPE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +OperationProgress = _reflection.GeneratedProtocolMessageType( + "OperationProgress", + (_message.Message,), + { + "DESCRIPTOR": _OPERATIONPROGRESS, + "__module__": "google.cloud.bigtable_admin_v2.proto.common_pb2", + "__doc__": """Encapsulates progress related information for a Cloud Bigtable long + running operation. + + Attributes: + progress_percent: + Percent completion of the operation. Values are between 0 and + 100 inclusive. + start_time: + Time the request was received. + end_time: + If set, the time at which this operation failed or was + completed successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.OperationProgress) + }, +) +_sym_db.RegisterMessage(OperationProgress) + + +DESCRIPTOR._options = None +# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py new file mode 100644 index 000000000..8a9393943 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py @@ -0,0 +1,3 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc diff --git a/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/google/cloud/bigtable_admin_v2/proto/instance_pb2.py new file mode 100644 index 000000000..e0138e0fb --- /dev/null +++ b/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -0,0 +1,886 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/bigtable_admin_v2/proto/instance.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 +from google.cloud.bigtable_admin_v2.proto import ( + common_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2, +) + + +DESCRIPTOR = _descriptor.FileDescriptor( + name="google/cloud/bigtable_admin_v2/proto/instance.proto", + package="google.bigtable.admin.v2", + syntax="proto3", + serialized_options=b'\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n3google/cloud/bigtable_admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto"\xdd\x03\n\x08Instance\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x19\n\x0c\x64isplay_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02:N\xea\x41K\n bigtable.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}"\xa7\x03\n\x07\x43luster\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x38\n\x08location\x18\x02 \x01(\tB&\xfa\x41#\n!locations.googleapis.com/Location\x12;\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.StateB\x03\xe0\x41\x03\x12\x18\n\x0bserve_nodes\x18\x04 \x01(\x05\x42\x03\xe0\x41\x02\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04:`\xea\x41]\n\x1f\x62igtable.googleapis.com/Cluster\x12:projects/{project}/instances/{instance}/clusters/{cluster}"\xee\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08:j\xea\x41g\n"bigtable.googleapis.com/AppProfile\x12\x41projects/{project}/instances/{instance}/appProfiles/{app_profile}B\x10\n\x0erouting_policyB\xd5\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', + dependencies=[ + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, + ], +) + + +_INSTANCE_STATE = _descriptor.EnumDescriptor( + name="State", + full_name="google.bigtable.admin.v2.Instance.State", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="STATE_NOT_KNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="READY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="CREATING", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=474, + serialized_end=527, +) +_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) + +_INSTANCE_TYPE = _descriptor.EnumDescriptor( + name="Type", + full_name="google.bigtable.admin.v2.Instance.Type", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="TYPE_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="PRODUCTION", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="DEVELOPMENT", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=529, + serialized_end=590, +) +_sym_db.RegisterEnumDescriptor(_INSTANCE_TYPE) + +_CLUSTER_STATE = _descriptor.EnumDescriptor( + name="State", + full_name="google.bigtable.admin.v2.Cluster.State", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="STATE_NOT_KNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="READY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="CREATING", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="RESIZING", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="DISABLED", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=917, + serialized_end=998, +) +_sym_db.RegisterEnumDescriptor(_CLUSTER_STATE) + + +_INSTANCE_LABELSENTRY = _descriptor.Descriptor( + name="LabelsEntry", + full_name="google.bigtable.admin.v2.Instance.LabelsEntry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.bigtable.admin.v2.Instance.LabelsEntry.key", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.admin.v2.Instance.LabelsEntry.value", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=b"8\001", + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=427, + serialized_end=472, +) + +_INSTANCE = _descriptor.Descriptor( + name="Instance", + full_name="google.bigtable.admin.v2.Instance", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.Instance.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="display_name", + full_name="google.bigtable.admin.v2.Instance.display_name", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="state", + full_name="google.bigtable.admin.v2.Instance.state", + index=2, + number=3, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="type", + full_name="google.bigtable.admin.v2.Instance.type", + index=3, + number=4, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="labels", + full_name="google.bigtable.admin.v2.Instance.labels", + index=4, + number=5, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[_INSTANCE_LABELSENTRY,], + enum_types=[_INSTANCE_STATE, _INSTANCE_TYPE,], + serialized_options=b"\352AK\n bigtable.googleapis.com/Instance\022'projects/{project}/instances/{instance}", + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=193, + serialized_end=670, +) + + +_CLUSTER = _descriptor.Descriptor( + name="Cluster", + full_name="google.bigtable.admin.v2.Cluster", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.Cluster.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="location", + full_name="google.bigtable.admin.v2.Cluster.location", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\372A#\n!locations.googleapis.com/Location", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="state", + full_name="google.bigtable.admin.v2.Cluster.state", + index=2, + number=3, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="serve_nodes", + full_name="google.bigtable.admin.v2.Cluster.serve_nodes", + index=3, + number=4, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="default_storage_type", + full_name="google.bigtable.admin.v2.Cluster.default_storage_type", + index=4, + number=5, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[_CLUSTER_STATE,], + serialized_options=b"\352A]\n\037bigtable.googleapis.com/Cluster\022:projects/{project}/instances/{instance}/clusters/{cluster}", + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=673, + serialized_end=1096, +) + + +_APPPROFILE_MULTICLUSTERROUTINGUSEANY = _descriptor.Descriptor( + name="MultiClusterRoutingUseAny", + full_name="google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1360, + serialized_end=1387, +) + +_APPPROFILE_SINGLECLUSTERROUTING = _descriptor.Descriptor( + name="SingleClusterRouting", + full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="cluster_id", + full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting.cluster_id", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="allow_transactional_writes", + full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting.allow_transactional_writes", + index=1, + number=2, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1389, + serialized_end=1467, +) + +_APPPROFILE = _descriptor.Descriptor( + name="AppProfile", + full_name="google.bigtable.admin.v2.AppProfile", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.AppProfile.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="etag", + full_name="google.bigtable.admin.v2.AppProfile.etag", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="description", + full_name="google.bigtable.admin.v2.AppProfile.description", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="multi_cluster_routing_use_any", + full_name="google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any", + index=3, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="single_cluster_routing", + full_name="google.bigtable.admin.v2.AppProfile.single_cluster_routing", + index=4, + number=6, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[ + _APPPROFILE_MULTICLUSTERROUTINGUSEANY, + _APPPROFILE_SINGLECLUSTERROUTING, + ], + enum_types=[], + serialized_options=b'\352Ag\n"bigtable.googleapis.com/AppProfile\022Aprojects/{project}/instances/{instance}/appProfiles/{app_profile}', + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="routing_policy", + full_name="google.bigtable.admin.v2.AppProfile.routing_policy", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=1099, + serialized_end=1593, +) + +_INSTANCE_LABELSENTRY.containing_type = _INSTANCE +_INSTANCE.fields_by_name["state"].enum_type = _INSTANCE_STATE +_INSTANCE.fields_by_name["type"].enum_type = _INSTANCE_TYPE +_INSTANCE.fields_by_name["labels"].message_type = _INSTANCE_LABELSENTRY +_INSTANCE_STATE.containing_type = _INSTANCE +_INSTANCE_TYPE.containing_type = _INSTANCE +_CLUSTER.fields_by_name["state"].enum_type = _CLUSTER_STATE +_CLUSTER.fields_by_name[ + "default_storage_type" +].enum_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._STORAGETYPE +) +_CLUSTER_STATE.containing_type = _CLUSTER +_APPPROFILE_MULTICLUSTERROUTINGUSEANY.containing_type = _APPPROFILE +_APPPROFILE_SINGLECLUSTERROUTING.containing_type = _APPPROFILE +_APPPROFILE.fields_by_name[ + "multi_cluster_routing_use_any" +].message_type = _APPPROFILE_MULTICLUSTERROUTINGUSEANY +_APPPROFILE.fields_by_name[ + "single_cluster_routing" +].message_type = _APPPROFILE_SINGLECLUSTERROUTING +_APPPROFILE.oneofs_by_name["routing_policy"].fields.append( + _APPPROFILE.fields_by_name["multi_cluster_routing_use_any"] +) +_APPPROFILE.fields_by_name[ + "multi_cluster_routing_use_any" +].containing_oneof = _APPPROFILE.oneofs_by_name["routing_policy"] +_APPPROFILE.oneofs_by_name["routing_policy"].fields.append( + _APPPROFILE.fields_by_name["single_cluster_routing"] +) +_APPPROFILE.fields_by_name[ + "single_cluster_routing" +].containing_oneof = _APPPROFILE.oneofs_by_name["routing_policy"] +DESCRIPTOR.message_types_by_name["Instance"] = _INSTANCE +DESCRIPTOR.message_types_by_name["Cluster"] = _CLUSTER +DESCRIPTOR.message_types_by_name["AppProfile"] = _APPPROFILE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Instance = _reflection.GeneratedProtocolMessageType( + "Instance", + (_message.Message,), + { + "LabelsEntry": _reflection.GeneratedProtocolMessageType( + "LabelsEntry", + (_message.Message,), + { + "DESCRIPTOR": _INSTANCE_LABELSENTRY, + "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2" + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance.LabelsEntry) + }, + ), + "DESCRIPTOR": _INSTANCE, + "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", + "__doc__": """A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an instance are served + from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. + + Attributes: + name: + The unique name of the instance. Values are of the form + ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. + display_name: + Required. The descriptive name for this instance as it appears + in UIs. Can be changed at any time, but should be kept + globally unique to avoid confusion. + state: + (\ ``OutputOnly``) The current state of the instance. + type: + The type of the instance. Defaults to ``PRODUCTION``. + labels: + Labels are a flexible and lightweight mechanism for organizing + cloud resources into groups that reflect a customer’s + organizational needs and deployment strategies. They can be + used to filter resources and aggregate metrics. - Label keys + must be between 1 and 63 characters long and must conform + to the regular expression: + ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - Label values + must be between 0 and 63 characters long and must conform + to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - + No more than 64 labels can be associated with a given + resource. - Keys and values must both be under 128 bytes. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) + }, +) +_sym_db.RegisterMessage(Instance) +_sym_db.RegisterMessage(Instance.LabelsEntry) + +Cluster = _reflection.GeneratedProtocolMessageType( + "Cluster", + (_message.Message,), + { + "DESCRIPTOR": _CLUSTER, + "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", + "__doc__": """A resizable group of nodes in a particular cloud location, capable of + serving all [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + + Attributes: + name: + The unique name of the cluster. Values are of the form ``proje + cts/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. + location: + (\ ``CreationOnly``) The location where this cluster’s nodes + and storage reside. For best performance, clients should be + located as close as possible to this cluster. Currently only + zones are supported, so values should be of the form + ``projects/{project}/locations/{zone}``. + state: + The current state of the cluster. + serve_nodes: + Required. The number of nodes allocated to this cluster. More + nodes enable higher throughput and more consistent + performance. + default_storage_type: + (\ ``CreationOnly``) The type of storage used by this cluster + to serve its parent instance’s tables, unless explicitly + overridden. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) + }, +) +_sym_db.RegisterMessage(Cluster) + +AppProfile = _reflection.GeneratedProtocolMessageType( + "AppProfile", + (_message.Message,), + { + "MultiClusterRoutingUseAny": _reflection.GeneratedProtocolMessageType( + "MultiClusterRoutingUseAny", + (_message.Message,), + { + "DESCRIPTOR": _APPPROFILE_MULTICLUSTERROUTINGUSEANY, + "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", + "__doc__": """Read/write requests are routed to the nearest cluster in the instance, + and will fail over to the nearest cluster that is available in the + event of transient errors or delays. Clusters in a region are + considered equidistant. Choosing this option sacrifices read-your- + writes consistency to improve availability.""", + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny) + }, + ), + "SingleClusterRouting": _reflection.GeneratedProtocolMessageType( + "SingleClusterRouting", + (_message.Message,), + { + "DESCRIPTOR": _APPPROFILE_SINGLECLUSTERROUTING, + "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", + "__doc__": """Unconditionally routes all read/write requests to a specific cluster. + This option preserves read-your-writes consistency but does not + improve availability. + + Attributes: + cluster_id: + The cluster to which read/write requests should be routed. + allow_transactional_writes: + Whether or not ``CheckAndMutateRow`` and + ``ReadModifyWriteRow`` requests are allowed by this app + profile. It is unsafe to send these requests to the same + table/row/column in multiple clusters. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.SingleClusterRouting) + }, + ), + "DESCRIPTOR": _APPPROFILE, + "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", + "__doc__": """A configuration object describing how Cloud Bigtable should treat + traffic from a particular end user application. + + Attributes: + name: + (\ ``OutputOnly``) The unique name of the app profile. Values + are of the form + ``projects//instances//appProfiles/[_a- + zA-Z0-9][-_.a-zA-Z0-9]*``. + etag: + Strongly validated etag for optimistic concurrency control. + Preserve the value returned from ``GetAppProfile`` when + calling ``UpdateAppProfile`` to fail the request if there has + been a modification in the mean time. The ``update_mask`` of + the request need not include ``etag`` for this protection to + apply. See `Wikipedia + `__ and `RFC 7232 + `__ for more + details. + description: + Optional long form description of the use case for this + AppProfile. + routing_policy: + The routing policy for all read/write requests that use this + app profile. A value must be explicitly set. + multi_cluster_routing_use_any: + Use a multi-cluster routing policy. + single_cluster_routing: + Use a single-cluster routing policy. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile) + }, +) +_sym_db.RegisterMessage(AppProfile) +_sym_db.RegisterMessage(AppProfile.MultiClusterRoutingUseAny) +_sym_db.RegisterMessage(AppProfile.SingleClusterRouting) + + +DESCRIPTOR._options = None +_INSTANCE_LABELSENTRY._options = None +_INSTANCE.fields_by_name["name"]._options = None +_INSTANCE.fields_by_name["display_name"]._options = None +_INSTANCE._options = None +_CLUSTER.fields_by_name["name"]._options = None +_CLUSTER.fields_by_name["location"]._options = None +_CLUSTER.fields_by_name["state"]._options = None +_CLUSTER.fields_by_name["serve_nodes"]._options = None +_CLUSTER._options = None +_APPPROFILE._options = None +# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py new file mode 100644 index 000000000..8a9393943 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py @@ -0,0 +1,3 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc diff --git a/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/google/cloud/bigtable_admin_v2/proto/table_pb2.py new file mode 100644 index 000000000..67238a81e --- /dev/null +++ b/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -0,0 +1,1682 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/bigtable_admin_v2/proto/table.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name="google/cloud/bigtable_admin_v2/proto/table.proto", + package="google.bigtable.admin.v2", + syntax="proto3", + serialized_options=b'\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n0google/cloud/bigtable_admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x9b\x01\n\x0bRestoreInfo\x12@\n\x0bsource_type\x18\x01 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x02 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x42\r\n\x0bsource_info"\xfb\x07\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x12;\n\x0crestore_info\x18\x06 \x01(\x0b\x32%.google.bigtable.admin.v2.RestoreInfo\x1a\xf9\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState"\x8e\x01\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x12\x14\n\x10READY_OPTIMIZING\x10\x05\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04:Z\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule"\xc7\x03\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02:v\xea\x41s\n bigtable.googleapis.com/Snapshot\x12Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}"\xd7\x03\n\x06\x42\x61\x63kup\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x1c\n\x0csource_table\x18\x02 \x01(\tB\x06\xe0\x41\x05\xe0\x41\x02\x12\x34\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x02\x12\x33\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x17\n\nsize_bytes\x18\x06 \x01(\x03\x42\x03\xe0\x41\x03\x12:\n\x05state\x18\x07 \x01(\x0e\x32&.google.bigtable.admin.v2.Backup.StateB\x03\xe0\x41\x03"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:p\xea\x41m\n\x1e\x62igtable.googleapis.com/Backup\x12Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}"\xa4\x01\n\nBackupInfo\x12\x13\n\x06\x62\x61\x63kup\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x33\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x19\n\x0csource_table\x18\x04 \x01(\tB\x03\xe0\x41\x03*D\n\x11RestoreSourceType\x12#\n\x1fRESTORE_SOURCE_TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06\x42\x41\x43KUP\x10\x01\x42\xd2\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', + dependencies=[ + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, + google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, + google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + ], +) + +_RESTORESOURCETYPE = _descriptor.EnumDescriptor( + name="RestoreSourceType", + full_name="google.bigtable.admin.v2.RestoreSourceType", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="RESTORE_SOURCE_TYPE_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="BACKUP", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=2893, + serialized_end=2961, +) +_sym_db.RegisterEnumDescriptor(_RESTORESOURCETYPE) + +RestoreSourceType = enum_type_wrapper.EnumTypeWrapper(_RESTORESOURCETYPE) +RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 +BACKUP = 1 + + +_TABLE_CLUSTERSTATE_REPLICATIONSTATE = _descriptor.EnumDescriptor( + name="ReplicationState", + full_name="google.bigtable.admin.v2.Table.ClusterState.ReplicationState", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="STATE_NOT_KNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="INITIALIZING", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="PLANNED_MAINTENANCE", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="UNPLANNED_MAINTENANCE", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="READY", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="READY_OPTIMIZING", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=783, + serialized_end=925, +) +_sym_db.RegisterEnumDescriptor(_TABLE_CLUSTERSTATE_REPLICATIONSTATE) + +_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( + name="TimestampGranularity", + full_name="google.bigtable.admin.v2.Table.TimestampGranularity", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="TIMESTAMP_GRANULARITY_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="MILLIS", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=1122, + serialized_end=1195, +) +_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) + +_TABLE_VIEW = _descriptor.EnumDescriptor( + name="View", + full_name="google.bigtable.admin.v2.Table.View", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="VIEW_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="NAME_ONLY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="SCHEMA_VIEW", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="REPLICATION_VIEW", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="FULL", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=1197, + serialized_end=1289, +) +_sym_db.RegisterEnumDescriptor(_TABLE_VIEW) + +_SNAPSHOT_STATE = _descriptor.EnumDescriptor( + name="State", + full_name="google.bigtable.admin.v2.Snapshot.State", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="STATE_NOT_KNOWN", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="READY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="CREATING", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=2077, + serialized_end=2130, +) +_sym_db.RegisterEnumDescriptor(_SNAPSHOT_STATE) + +_BACKUP_STATE = _descriptor.EnumDescriptor( + name="State", + full_name="google.bigtable.admin.v2.Backup.State", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="STATE_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="CREATING", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="READY", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=2555, + serialized_end=2610, +) +_sym_db.RegisterEnumDescriptor(_BACKUP_STATE) + + +_RESTOREINFO = _descriptor.Descriptor( + name="RestoreInfo", + full_name="google.bigtable.admin.v2.RestoreInfo", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="source_type", + full_name="google.bigtable.admin.v2.RestoreInfo.source_type", + index=0, + number=1, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="backup_info", + full_name="google.bigtable.admin.v2.RestoreInfo.backup_info", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="source_info", + full_name="google.bigtable.admin.v2.RestoreInfo.source_info", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=204, + serialized_end=359, +) + + +_TABLE_CLUSTERSTATE = _descriptor.Descriptor( + name="ClusterState", + full_name="google.bigtable.admin.v2.Table.ClusterState", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="replication_state", + full_name="google.bigtable.admin.v2.Table.ClusterState.replication_state", + index=0, + number=1, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[_TABLE_CLUSTERSTATE_REPLICATIONSTATE,], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=676, + serialized_end=925, +) + +_TABLE_CLUSTERSTATESENTRY = _descriptor.Descriptor( + name="ClusterStatesEntry", + full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry.key", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry.value", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=b"8\001", + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=927, + serialized_end=1025, +) + +_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( + name="ColumnFamiliesEntry", + full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=b"8\001", + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1027, + serialized_end=1120, +) + +_TABLE = _descriptor.Descriptor( + name="Table", + full_name="google.bigtable.admin.v2.Table", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.Table.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="cluster_states", + full_name="google.bigtable.admin.v2.Table.cluster_states", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="column_families", + full_name="google.bigtable.admin.v2.Table.column_families", + index=2, + number=3, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="granularity", + full_name="google.bigtable.admin.v2.Table.granularity", + index=3, + number=4, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="restore_info", + full_name="google.bigtable.admin.v2.Table.restore_info", + index=4, + number=6, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[ + _TABLE_CLUSTERSTATE, + _TABLE_CLUSTERSTATESENTRY, + _TABLE_COLUMNFAMILIESENTRY, + ], + enum_types=[_TABLE_TIMESTAMPGRANULARITY, _TABLE_VIEW,], + serialized_options=b"\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=362, + serialized_end=1381, +) + + +_COLUMNFAMILY = _descriptor.Descriptor( + name="ColumnFamily", + full_name="google.bigtable.admin.v2.ColumnFamily", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="gc_rule", + full_name="google.bigtable.admin.v2.ColumnFamily.gc_rule", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1383, + serialized_end=1448, +) + + +_GCRULE_INTERSECTION = _descriptor.Descriptor( + name="Intersection", + full_name="google.bigtable.admin.v2.GcRule.Intersection", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="rules", + full_name="google.bigtable.admin.v2.GcRule.Intersection.rules", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1663, + serialized_end=1726, +) + +_GCRULE_UNION = _descriptor.Descriptor( + name="Union", + full_name="google.bigtable.admin.v2.GcRule.Union", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="rules", + full_name="google.bigtable.admin.v2.GcRule.Union.rules", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1728, + serialized_end=1784, +) + +_GCRULE = _descriptor.Descriptor( + name="GcRule", + full_name="google.bigtable.admin.v2.GcRule", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="max_num_versions", + full_name="google.bigtable.admin.v2.GcRule.max_num_versions", + index=0, + number=1, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="max_age", + full_name="google.bigtable.admin.v2.GcRule.max_age", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="intersection", + full_name="google.bigtable.admin.v2.GcRule.intersection", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="union", + full_name="google.bigtable.admin.v2.GcRule.union", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION,], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="rule", + full_name="google.bigtable.admin.v2.GcRule.rule", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=1451, + serialized_end=1792, +) + + +_SNAPSHOT = _descriptor.Descriptor( + name="Snapshot", + full_name="google.bigtable.admin.v2.Snapshot", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.Snapshot.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="source_table", + full_name="google.bigtable.admin.v2.Snapshot.source_table", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="data_size_bytes", + full_name="google.bigtable.admin.v2.Snapshot.data_size_bytes", + index=2, + number=3, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="create_time", + full_name="google.bigtable.admin.v2.Snapshot.create_time", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="delete_time", + full_name="google.bigtable.admin.v2.Snapshot.delete_time", + index=4, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="state", + full_name="google.bigtable.admin.v2.Snapshot.state", + index=5, + number=6, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="description", + full_name="google.bigtable.admin.v2.Snapshot.description", + index=6, + number=7, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[_SNAPSHOT_STATE,], + serialized_options=b"\352As\n bigtable.googleapis.com/Snapshot\022Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}", + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1795, + serialized_end=2250, +) + + +_BACKUP = _descriptor.Descriptor( + name="Backup", + full_name="google.bigtable.admin.v2.Backup", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.Backup.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="source_table", + full_name="google.bigtable.admin.v2.Backup.source_table", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\005\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="expire_time", + full_name="google.bigtable.admin.v2.Backup.expire_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="start_time", + full_name="google.bigtable.admin.v2.Backup.start_time", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_time", + full_name="google.bigtable.admin.v2.Backup.end_time", + index=4, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="size_bytes", + full_name="google.bigtable.admin.v2.Backup.size_bytes", + index=5, + number=6, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="state", + full_name="google.bigtable.admin.v2.Backup.state", + index=6, + number=7, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[_BACKUP_STATE,], + serialized_options=b"\352Am\n\036bigtable.googleapis.com/Backup\022Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}", + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2253, + serialized_end=2724, +) + + +_BACKUPINFO = _descriptor.Descriptor( + name="BackupInfo", + full_name="google.bigtable.admin.v2.BackupInfo", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="backup", + full_name="google.bigtable.admin.v2.BackupInfo.backup", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="start_time", + full_name="google.bigtable.admin.v2.BackupInfo.start_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_time", + full_name="google.bigtable.admin.v2.BackupInfo.end_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="source_table", + full_name="google.bigtable.admin.v2.BackupInfo.source_table", + index=3, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2727, + serialized_end=2891, +) + +_RESTOREINFO.fields_by_name["source_type"].enum_type = _RESTORESOURCETYPE +_RESTOREINFO.fields_by_name["backup_info"].message_type = _BACKUPINFO +_RESTOREINFO.oneofs_by_name["source_info"].fields.append( + _RESTOREINFO.fields_by_name["backup_info"] +) +_RESTOREINFO.fields_by_name[ + "backup_info" +].containing_oneof = _RESTOREINFO.oneofs_by_name["source_info"] +_TABLE_CLUSTERSTATE.fields_by_name[ + "replication_state" +].enum_type = _TABLE_CLUSTERSTATE_REPLICATIONSTATE +_TABLE_CLUSTERSTATE.containing_type = _TABLE +_TABLE_CLUSTERSTATE_REPLICATIONSTATE.containing_type = _TABLE_CLUSTERSTATE +_TABLE_CLUSTERSTATESENTRY.fields_by_name["value"].message_type = _TABLE_CLUSTERSTATE +_TABLE_CLUSTERSTATESENTRY.containing_type = _TABLE +_TABLE_COLUMNFAMILIESENTRY.fields_by_name["value"].message_type = _COLUMNFAMILY +_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE +_TABLE.fields_by_name["cluster_states"].message_type = _TABLE_CLUSTERSTATESENTRY +_TABLE.fields_by_name["column_families"].message_type = _TABLE_COLUMNFAMILIESENTRY +_TABLE.fields_by_name["granularity"].enum_type = _TABLE_TIMESTAMPGRANULARITY +_TABLE.fields_by_name["restore_info"].message_type = _RESTOREINFO +_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE +_TABLE_VIEW.containing_type = _TABLE +_COLUMNFAMILY.fields_by_name["gc_rule"].message_type = _GCRULE +_GCRULE_INTERSECTION.fields_by_name["rules"].message_type = _GCRULE +_GCRULE_INTERSECTION.containing_type = _GCRULE +_GCRULE_UNION.fields_by_name["rules"].message_type = _GCRULE +_GCRULE_UNION.containing_type = _GCRULE +_GCRULE.fields_by_name[ + "max_age" +].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_GCRULE.fields_by_name["intersection"].message_type = _GCRULE_INTERSECTION +_GCRULE.fields_by_name["union"].message_type = _GCRULE_UNION +_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["max_num_versions"]) +_GCRULE.fields_by_name["max_num_versions"].containing_oneof = _GCRULE.oneofs_by_name[ + "rule" +] +_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["max_age"]) +_GCRULE.fields_by_name["max_age"].containing_oneof = _GCRULE.oneofs_by_name["rule"] +_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["intersection"]) +_GCRULE.fields_by_name["intersection"].containing_oneof = _GCRULE.oneofs_by_name["rule"] +_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["union"]) +_GCRULE.fields_by_name["union"].containing_oneof = _GCRULE.oneofs_by_name["rule"] +_SNAPSHOT.fields_by_name["source_table"].message_type = _TABLE +_SNAPSHOT.fields_by_name[ + "create_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_SNAPSHOT.fields_by_name[ + "delete_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_SNAPSHOT.fields_by_name["state"].enum_type = _SNAPSHOT_STATE +_SNAPSHOT_STATE.containing_type = _SNAPSHOT +_BACKUP.fields_by_name[ + "expire_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_BACKUP.fields_by_name[ + "start_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_BACKUP.fields_by_name[ + "end_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_BACKUP.fields_by_name["state"].enum_type = _BACKUP_STATE +_BACKUP_STATE.containing_type = _BACKUP +_BACKUPINFO.fields_by_name[ + "start_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_BACKUPINFO.fields_by_name[ + "end_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +DESCRIPTOR.message_types_by_name["RestoreInfo"] = _RESTOREINFO +DESCRIPTOR.message_types_by_name["Table"] = _TABLE +DESCRIPTOR.message_types_by_name["ColumnFamily"] = _COLUMNFAMILY +DESCRIPTOR.message_types_by_name["GcRule"] = _GCRULE +DESCRIPTOR.message_types_by_name["Snapshot"] = _SNAPSHOT +DESCRIPTOR.message_types_by_name["Backup"] = _BACKUP +DESCRIPTOR.message_types_by_name["BackupInfo"] = _BACKUPINFO +DESCRIPTOR.enum_types_by_name["RestoreSourceType"] = _RESTORESOURCETYPE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +RestoreInfo = _reflection.GeneratedProtocolMessageType( + "RestoreInfo", + (_message.Message,), + { + "DESCRIPTOR": _RESTOREINFO, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """Information about a table restore. + + Attributes: + source_type: + The type of the restore source. + source_info: + Information about the source used to restore the table. + backup_info: + Information about the backup used to restore the table. The + backup may no longer exist. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreInfo) + }, +) +_sym_db.RegisterMessage(RestoreInfo) + +Table = _reflection.GeneratedProtocolMessageType( + "Table", + (_message.Message,), + { + "ClusterState": _reflection.GeneratedProtocolMessageType( + "ClusterState", + (_message.Message,), + { + "DESCRIPTOR": _TABLE_CLUSTERSTATE, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """The state of a table’s data in a particular cluster. + + Attributes: + replication_state: + Output only. The state of replication for the table in this + cluster. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterState) + }, + ), + "ClusterStatesEntry": _reflection.GeneratedProtocolMessageType( + "ClusterStatesEntry", + (_message.Message,), + { + "DESCRIPTOR": _TABLE_CLUSTERSTATESENTRY, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2" + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterStatesEntry) + }, + ), + "ColumnFamiliesEntry": _reflection.GeneratedProtocolMessageType( + "ColumnFamiliesEntry", + (_message.Message,), + { + "DESCRIPTOR": _TABLE_COLUMNFAMILIESENTRY, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2" + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) + }, + ), + "DESCRIPTOR": _TABLE, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """A collection of user data indexed by row, column, and timestamp. Each + table is served using the resources of its parent cluster. + + Attributes: + name: + Output only. The unique name of the table. Values are of the + form ``projects//instances//tables/[_a- + zA-Z0-9][-_.a-zA-Z0-9]*``. Views: ``NAME_ONLY``, + ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, ``FULL`` + cluster_states: + Output only. Map from cluster ID to per-cluster table state. + If it could not be determined whether or not the table has + data in a particular cluster (for example, if its zone is + unavailable), then there will be an entry for the cluster with + UNKNOWN ``replication_status``. Views: ``REPLICATION_VIEW``, + ``FULL`` + column_families: + (\ ``CreationOnly``) The column families configured for this + table, mapped by column family ID. Views: ``SCHEMA_VIEW``, + ``FULL`` + granularity: + (\ ``CreationOnly``) The granularity (i.e. ``MILLIS``) at + which timestamps are stored in this table. Timestamps not + matching the granularity will be rejected. If unspecified at + creation time, the value will be set to ``MILLIS``. Views: + ``SCHEMA_VIEW``, ``FULL``. + restore_info: + Output only. If this table was restored from another data + source (e.g. a backup), this field will be populated with + information about the restore. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) + }, +) +_sym_db.RegisterMessage(Table) +_sym_db.RegisterMessage(Table.ClusterState) +_sym_db.RegisterMessage(Table.ClusterStatesEntry) +_sym_db.RegisterMessage(Table.ColumnFamiliesEntry) + +ColumnFamily = _reflection.GeneratedProtocolMessageType( + "ColumnFamily", + (_message.Message,), + { + "DESCRIPTOR": _COLUMNFAMILY, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """A set of columns within a table which share a common configuration. + + Attributes: + gc_rule: + Garbage collection rule specified as a protobuf. Must + serialize to at most 500 bytes. NOTE: Garbage collection + executes opportunistically in the background, and so it’s + possible for reads to return a cell even if it matches the + active GC expression for its family. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) + }, +) +_sym_db.RegisterMessage(ColumnFamily) + +GcRule = _reflection.GeneratedProtocolMessageType( + "GcRule", + (_message.Message,), + { + "Intersection": _reflection.GeneratedProtocolMessageType( + "Intersection", + (_message.Message,), + { + "DESCRIPTOR": _GCRULE_INTERSECTION, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """A GcRule which deletes cells matching all of the given rules. + + Attributes: + rules: + Only delete cells which would be deleted by every element of + ``rules``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) + }, + ), + "Union": _reflection.GeneratedProtocolMessageType( + "Union", + (_message.Message,), + { + "DESCRIPTOR": _GCRULE_UNION, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """A GcRule which deletes cells matching any of the given rules. + + Attributes: + rules: + Delete cells which would be deleted by any element of + ``rules``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) + }, + ), + "DESCRIPTOR": _GCRULE, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """Rule for determining which cells to delete during garbage collection. + + Attributes: + rule: + Garbage collection rules. + max_num_versions: + Delete all cells in a column except the most recent N. + max_age: + Delete cells in a column older than the given age. Values must + be at least one millisecond, and will be truncated to + microsecond granularity. + intersection: + Delete cells that would be deleted by every nested rule. + union: + Delete cells that would be deleted by any nested rule. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) + }, +) +_sym_db.RegisterMessage(GcRule) +_sym_db.RegisterMessage(GcRule.Intersection) +_sym_db.RegisterMessage(GcRule.Union) + +Snapshot = _reflection.GeneratedProtocolMessageType( + "Snapshot", + (_message.Message,), + { + "DESCRIPTOR": _SNAPSHOT, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """A snapshot of a table at a particular time. A snapshot can be used as + a checkpoint for data restoration or a data source for a new table. + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. + + Attributes: + name: + Output only. The unique name of the snapshot. Values are of + the form ``projects//instances//clusters//snapshots/``. + source_table: + Output only. The source table at the time the snapshot was + taken. + data_size_bytes: + Output only. The size of the data in the source table at the + time the snapshot was taken. In some cases, this value may be + computed asynchronously via a background process and a + placeholder of 0 will be used in the meantime. + create_time: + Output only. The time when the snapshot is created. + delete_time: + Output only. The time when the snapshot will be deleted. The + maximum amount of time a snapshot can stay active is 365 days. + If ‘ttl’ is not specified, the default maximum of 365 days + will be used. + state: + Output only. The current state of the snapshot. + description: + Output only. Description of the snapshot. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Snapshot) + }, +) +_sym_db.RegisterMessage(Snapshot) + +Backup = _reflection.GeneratedProtocolMessageType( + "Backup", + (_message.Message,), + { + "DESCRIPTOR": _BACKUP, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """A backup of a Cloud Bigtable table. + + Attributes: + name: + Output only. A globally unique identifier for the backup which + cannot be changed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/ + backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` The final segment of the + name must be between 1 and 50 characters in length. The + backup is stored in the cluster identified by the prefix of + the backup name of the form ``projects/{project}/instances/{in + stance}/clusters/{cluster}``. + source_table: + Required. Immutable. Name of the table from which this backup + was created. This needs to be in the same instance as the + backup. Values are of the form ``projects/{project}/instances/ + {instance}/tables/{source_table}``. + expire_time: + Required. The expiration time of the backup, with microseconds + granularity that must be at least 6 hours and at most 30 days + from the time the request is received. Once the + ``expire_time`` has passed, Cloud Bigtable will delete the + backup and free the resources used by the backup. + start_time: + Output only. ``start_time`` is the time that the backup was + started (i.e. approximately the time the [CreateBackup][google + .bigtable.admin.v2.BigtableTableAdmin.CreateBackup] request is + received). The row data in this backup will be no older than + this timestamp. + end_time: + Output only. ``end_time`` is the time that the backup was + finished. The row data in the backup will be no newer than + this timestamp. + size_bytes: + Output only. Size of the backup in bytes. + state: + Output only. The current state of the backup. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Backup) + }, +) +_sym_db.RegisterMessage(Backup) + +BackupInfo = _reflection.GeneratedProtocolMessageType( + "BackupInfo", + (_message.Message,), + { + "DESCRIPTOR": _BACKUPINFO, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """Information about a backup. + + Attributes: + backup: + Output only. Name of the backup. + start_time: + Output only. The time that the backup was started. Row data in + the backup will be no older than this timestamp. + end_time: + Output only. This time that the backup was finished. Row data + in the backup will be no newer than this timestamp. + source_table: + Output only. Name of the table the backup was created from. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.BackupInfo) + }, +) +_sym_db.RegisterMessage(BackupInfo) + + +DESCRIPTOR._options = None +_TABLE_CLUSTERSTATESENTRY._options = None +_TABLE_COLUMNFAMILIESENTRY._options = None +_TABLE._options = None +_SNAPSHOT._options = None +_BACKUP.fields_by_name["name"]._options = None +_BACKUP.fields_by_name["source_table"]._options = None +_BACKUP.fields_by_name["expire_time"]._options = None +_BACKUP.fields_by_name["start_time"]._options = None +_BACKUP.fields_by_name["end_time"]._options = None +_BACKUP.fields_by_name["size_bytes"]._options = None +_BACKUP.fields_by_name["state"]._options = None +_BACKUP._options = None +_BACKUPINFO.fields_by_name["backup"]._options = None +_BACKUPINFO.fields_by_name["start_time"]._options = None +_BACKUPINFO.fields_by_name["end_time"]._options = None +_BACKUPINFO.fields_by_name["source_table"]._options = None +# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py new file mode 100644 index 000000000..8a9393943 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py @@ -0,0 +1,3 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc diff --git a/google/cloud/bigtable_admin_v2/types.py b/google/cloud/bigtable_admin_v2/types.py new file mode 100644 index 000000000..7dbb939d1 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/types.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from __future__ import absolute_import +import sys + +from google.api_core.protobuf_helpers import get_messages + +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import common_pb2 +from google.cloud.bigtable_admin_v2.proto import instance_pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import options_pb2 +from google.iam.v1 import policy_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import any_pb2 +from google.protobuf import duration_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 +from google.protobuf import timestamp_pb2 +from google.rpc import status_pb2 +from google.type import expr_pb2 + + +_shared_modules = [ + iam_policy_pb2, + options_pb2, + policy_pb2, + operations_pb2, + any_pb2, + duration_pb2, + empty_pb2, + field_mask_pb2, + timestamp_pb2, + status_pb2, + expr_pb2, +] + +_local_modules = [ + bigtable_instance_admin_pb2, + bigtable_table_admin_pb2, + common_pb2, + instance_pb2, + table_pb2, +] + +names = [] + +for module in _shared_modules: # pragma: NO COVER + for name, message in get_messages(module).items(): + setattr(sys.modules[__name__], name, message) + names.append(name) +for module in _local_modules: + for name, message in get_messages(module).items(): + message.__module__ = "google.cloud.bigtable_admin_v2.types" + setattr(sys.modules[__name__], name, message) + names.append(name) + + +__all__ = tuple(sorted(names)) diff --git a/google/cloud/bigtable_v2/__init__.py b/google/cloud/bigtable_v2/__init__.py index 0ab15791b..8c31017cc 100644 --- a/google/cloud/bigtable_v2/__init__.py +++ b/google/cloud/bigtable_v2/__init__.py @@ -1,71 +1,42 @@ # -*- coding: utf-8 -*- - +# # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -from .services.bigtable import BigtableClient -from .types.bigtable import CheckAndMutateRowRequest -from .types.bigtable import CheckAndMutateRowResponse -from .types.bigtable import MutateRowRequest -from .types.bigtable import MutateRowResponse -from .types.bigtable import MutateRowsRequest -from .types.bigtable import MutateRowsResponse -from .types.bigtable import ReadModifyWriteRowRequest -from .types.bigtable import ReadModifyWriteRowResponse -from .types.bigtable import ReadRowsRequest -from .types.bigtable import ReadRowsResponse -from .types.bigtable import SampleRowKeysRequest -from .types.bigtable import SampleRowKeysResponse -from .types.data import Cell -from .types.data import Column -from .types.data import ColumnRange -from .types.data import Family -from .types.data import Mutation -from .types.data import ReadModifyWriteRule -from .types.data import Row -from .types.data import RowFilter -from .types.data import RowRange -from .types.data import RowSet -from .types.data import TimestampRange -from .types.data import ValueRange + +from __future__ import absolute_import +import sys +import warnings + +from google.cloud.bigtable_v2 import types +from google.cloud.bigtable_v2.gapic import bigtable_client + + +if sys.version_info[:2] == (2, 7): + message = ( + "A future version of this library will drop support for Python 2.7. " + "More details about Python 2 support for Google Cloud Client Libraries " + "can be found at https://cloud.google.com/python/docs/python2-sunset/" + ) + warnings.warn(message, DeprecationWarning) + + +class BigtableClient(bigtable_client.BigtableClient): + __doc__ = bigtable_client.BigtableClient.__doc__ __all__ = ( - "Cell", - "CheckAndMutateRowRequest", - "CheckAndMutateRowResponse", - "Column", - "ColumnRange", - "Family", - "MutateRowRequest", - "MutateRowResponse", - "MutateRowsRequest", - "MutateRowsResponse", - "Mutation", - "ReadModifyWriteRowRequest", - "ReadModifyWriteRowResponse", - "ReadModifyWriteRule", - "ReadRowsRequest", - "ReadRowsResponse", - "Row", - "RowFilter", - "RowRange", - "RowSet", - "SampleRowKeysRequest", - "SampleRowKeysResponse", - "TimestampRange", - "ValueRange", + "types", "BigtableClient", ) diff --git a/google/cloud/bigtable_v2/gapic/__init__.py b/google/cloud/bigtable_v2/gapic/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/google/cloud/bigtable_v2/gapic/bigtable_client.py b/google/cloud/bigtable_v2/gapic/bigtable_client.py new file mode 100644 index 000000000..f02e0048f --- /dev/null +++ b/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -0,0 +1,771 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Accesses the google.bigtable.v2 Bigtable API.""" + +import pkg_resources +import warnings + +from google.oauth2 import service_account +import google.api_core.client_options +import google.api_core.gapic_v1.client_info +import google.api_core.gapic_v1.config +import google.api_core.gapic_v1.method +import google.api_core.gapic_v1.routing_header +import google.api_core.grpc_helpers +import google.api_core.path_template +import grpc + +from google.cloud.bigtable_v2.gapic import bigtable_client_config +from google.cloud.bigtable_v2.gapic.transports import bigtable_grpc_transport +from google.cloud.bigtable_v2.proto import bigtable_pb2 +from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc +from google.cloud.bigtable_v2.proto import data_pb2 + + +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + "google-cloud-bigtable", +).version + + +class BigtableClient(object): + """Service for reading from and writing to existing Bigtable tables.""" + + SERVICE_ADDRESS = "bigtable.googleapis.com:443" + """The default address of the service.""" + + # The name of the interface for this client. This is the key used to + # find the method configuration in the client_config dictionary. + _INTERFACE_NAME = "google.bigtable.v2.Bigtable" + + @classmethod + def from_service_account_file(cls, filename, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @classmethod + def table_path(cls, project, instance, table): + """Return a fully-qualified table string.""" + return google.api_core.path_template.expand( + "projects/{project}/instances/{instance}/tables/{table}", + project=project, + instance=instance, + table=table, + ) + + def __init__( + self, + transport=None, + channel=None, + credentials=None, + client_config=None, + client_info=None, + client_options=None, + ): + """Constructor. + + Args: + transport (Union[~.BigtableGrpcTransport, + Callable[[~.Credentials, type], ~.BigtableGrpcTransport]): A transport + instance, responsible for actually making the API calls. + The default transport uses the gRPC protocol. + This argument may also be a callable which returns a + transport instance. Callables will be sent the credentials + as the first argument and the default transport class as + the second argument. + channel (grpc.Channel): DEPRECATED. A ``Channel`` instance + through which to make calls. This argument is mutually exclusive + with ``credentials``; providing both will raise an exception. + credentials (google.auth.credentials.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is mutually exclusive with providing a + transport instance to ``transport``; doing so will raise + an exception. + client_config (dict): DEPRECATED. A dictionary of call options for + each method. If not specified, the default configuration is used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + client_options (Union[dict, google.api_core.client_options.ClientOptions]): + Client options used to set user options on the client. API Endpoint + should be set through client_options. + """ + # Raise deprecation warnings for things we want to go away. + if client_config is not None: + warnings.warn( + "The `client_config` argument is deprecated.", + PendingDeprecationWarning, + stacklevel=2, + ) + else: + client_config = bigtable_client_config.config + + if channel: + warnings.warn( + "The `channel` argument is deprecated; use " "`transport` instead.", + PendingDeprecationWarning, + stacklevel=2, + ) + + api_endpoint = self.SERVICE_ADDRESS + if client_options: + if type(client_options) == dict: + client_options = google.api_core.client_options.from_dict( + client_options + ) + if client_options.api_endpoint: + api_endpoint = client_options.api_endpoint + + # Instantiate the transport. + # The transport is responsible for handling serialization and + # deserialization and actually sending data to the service. + if transport: + if callable(transport): + self.transport = transport( + credentials=credentials, + default_class=bigtable_grpc_transport.BigtableGrpcTransport, + address=api_endpoint, + ) + else: + if credentials: + raise ValueError( + "Received both a transport instance and " + "credentials; these are mutually exclusive." + ) + self.transport = transport + else: + self.transport = bigtable_grpc_transport.BigtableGrpcTransport( + address=api_endpoint, channel=channel, credentials=credentials, + ) + + if client_info is None: + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, + ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION + self._client_info = client_info + + # Parse out the default settings for retry and timeout for each RPC + # from the client configuration. + # (Ordinarily, these are the defaults specified in the `*_config.py` + # file next to this one.) + self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( + client_config["interfaces"][self._INTERFACE_NAME], + ) + + # Save a dictionary of cached API call functions. + # These are the actual callables which invoke the proper + # transport methods, wrapped with `wrap_method` to add retry, + # timeout, and the like. + self._inner_api_calls = {} + + # Service calls + def read_rows( + self, + table_name, + app_profile_id=None, + rows=None, + filter_=None, + rows_limit=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Streams back the contents of all requested rows in key order, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Example: + >>> from google.cloud import bigtable_v2 + >>> + >>> client = bigtable_v2.BigtableClient() + >>> + >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> for element in client.read_rows(table_name): + ... # process element + ... pass + + Args: + table_name (str): Required. The unique name of the table from which to read. Values + are of the form + ``projects//instances//tables/``. + app_profile_id (str): This value specifies routing for replication. If not specified, the + "default" application profile will be used. + rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.RowSet` + filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset, + reads the entirety of each row. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.RowFilter` + rows_limit (long): The read will terminate after committing to N rows' worth of results. The + default (zero) is to return all results. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse]. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "read_rows" not in self._inner_api_calls: + self._inner_api_calls[ + "read_rows" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.read_rows, + default_retry=self._method_configs["ReadRows"].retry, + default_timeout=self._method_configs["ReadRows"].timeout, + client_info=self._client_info, + ) + + request = bigtable_pb2.ReadRowsRequest( + table_name=table_name, + app_profile_id=app_profile_id, + rows=rows, + filter=filter_, + rows_limit=rows_limit, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("table_name", table_name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["read_rows"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def sample_row_keys( + self, + table_name, + app_profile_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + + Example: + >>> from google.cloud import bigtable_v2 + >>> + >>> client = bigtable_v2.BigtableClient() + >>> + >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> for element in client.sample_row_keys(table_name): + ... # process element + ... pass + + Args: + table_name (str): Required. The unique name of the table from which to sample row + keys. Values are of the form + ``projects//instances//tables/
``. + app_profile_id (str): This value specifies routing for replication. If not specified, the + "default" application profile will be used. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + Iterable[~google.cloud.bigtable_v2.types.SampleRowKeysResponse]. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "sample_row_keys" not in self._inner_api_calls: + self._inner_api_calls[ + "sample_row_keys" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.sample_row_keys, + default_retry=self._method_configs["SampleRowKeys"].retry, + default_timeout=self._method_configs["SampleRowKeys"].timeout, + client_info=self._client_info, + ) + + request = bigtable_pb2.SampleRowKeysRequest( + table_name=table_name, app_profile_id=app_profile_id, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("table_name", table_name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["sample_row_keys"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def mutate_row( + self, + table_name, + row_key, + mutations, + app_profile_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by ``mutation``. + + Example: + >>> from google.cloud import bigtable_v2 + >>> + >>> client = bigtable_v2.BigtableClient() + >>> + >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize `row_key`: + >>> row_key = b'' + >>> + >>> # TODO: Initialize `mutations`: + >>> mutations = [] + >>> + >>> response = client.mutate_row(table_name, row_key, mutations) + + Args: + table_name (str): Required. The unique name of the table to which the mutation should + be applied. Values are of the form + ``projects//instances//tables/
``. + row_key (bytes): Required. The key of the row to which the mutation should be applied. + mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Required. Changes to be atomically applied to the specified row. Entries are applied + in order, meaning that earlier mutations can be masked by later ones. + Must contain at least one entry and at most 100000. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.Mutation` + app_profile_id (str): This value specifies routing for replication. If not specified, the + "default" application profile will be used. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_v2.types.MutateRowResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "mutate_row" not in self._inner_api_calls: + self._inner_api_calls[ + "mutate_row" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.mutate_row, + default_retry=self._method_configs["MutateRow"].retry, + default_timeout=self._method_configs["MutateRow"].timeout, + client_info=self._client_info, + ) + + request = bigtable_pb2.MutateRowRequest( + table_name=table_name, + row_key=row_key, + mutations=mutations, + app_profile_id=app_profile_id, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("table_name", table_name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["mutate_row"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def mutate_rows( + self, + table_name, + entries, + app_profile_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + + Example: + >>> from google.cloud import bigtable_v2 + >>> + >>> client = bigtable_v2.BigtableClient() + >>> + >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize `entries`: + >>> entries = [] + >>> + >>> for element in client.mutate_rows(table_name, entries): + ... # process element + ... pass + + Args: + table_name (str): Required. The unique name of the table to which the mutations should be applied. + entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): Required. The row keys and corresponding mutations to be applied in bulk. + Each entry is applied as an atomic mutation, but the entries may be + applied in arbitrary order (even between entries for the same row). + At least one entry must be specified, and in total the entries can + contain at most 100000 mutations. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.Entry` + app_profile_id (str): This value specifies routing for replication. If not specified, the + "default" application profile will be used. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "mutate_rows" not in self._inner_api_calls: + self._inner_api_calls[ + "mutate_rows" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.mutate_rows, + default_retry=self._method_configs["MutateRows"].retry, + default_timeout=self._method_configs["MutateRows"].timeout, + client_info=self._client_info, + ) + + request = bigtable_pb2.MutateRowsRequest( + table_name=table_name, entries=entries, app_profile_id=app_profile_id, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("table_name", table_name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["mutate_rows"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def check_and_mutate_row( + self, + table_name, + row_key, + app_profile_id=None, + predicate_filter=None, + true_mutations=None, + false_mutations=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Mutates a row atomically based on the output of a predicate Reader filter. + + Example: + >>> from google.cloud import bigtable_v2 + >>> + >>> client = bigtable_v2.BigtableClient() + >>> + >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize `row_key`: + >>> row_key = b'' + >>> + >>> response = client.check_and_mutate_row(table_name, row_key) + + Args: + table_name (str): Required. The unique name of the table to which the conditional + mutation should be applied. Values are of the form + ``projects//instances//tables/
``. + row_key (bytes): Required. The key of the row to which the conditional mutation should be applied. + app_profile_id (str): This value specifies routing for replication. If not specified, the + "default" application profile will be used. + predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. + Depending on whether or not any results are yielded, either + ``true_mutations`` or ``false_mutations`` will be executed. If unset, + checks that the row contains any values at all. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.RowFilter` + true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when applied to + ``row_key``. Entries are applied in order, meaning that earlier + mutations can be masked by later ones. Must contain at least one entry + if ``false_mutations`` is empty, and at most 100000. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.Mutation` + false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when applied to + ``row_key``. Entries are applied in order, meaning that earlier + mutations can be masked by later ones. Must contain at least one entry + if ``true_mutations`` is empty, and at most 100000. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.Mutation` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "check_and_mutate_row" not in self._inner_api_calls: + self._inner_api_calls[ + "check_and_mutate_row" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.check_and_mutate_row, + default_retry=self._method_configs["CheckAndMutateRow"].retry, + default_timeout=self._method_configs["CheckAndMutateRow"].timeout, + client_info=self._client_info, + ) + + request = bigtable_pb2.CheckAndMutateRowRequest( + table_name=table_name, + row_key=row_key, + app_profile_id=app_profile_id, + predicate_filter=predicate_filter, + true_mutations=true_mutations, + false_mutations=false_mutations, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("table_name", table_name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["check_and_mutate_row"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def read_modify_write_row( + self, + table_name, + row_key, + rules, + app_profile_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Modifies a row atomically on the server. The method reads the latest + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + + Example: + >>> from google.cloud import bigtable_v2 + >>> + >>> client = bigtable_v2.BigtableClient() + >>> + >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize `row_key`: + >>> row_key = b'' + >>> + >>> # TODO: Initialize `rules`: + >>> rules = [] + >>> + >>> response = client.read_modify_write_row(table_name, row_key, rules) + + Args: + table_name (str): Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of the form + ``projects//instances//tables/
``. + row_key (bytes): Required. The key of the row to which the read/modify/write rules should be applied. + rules (list[Union[dict, ~google.cloud.bigtable_v2.types.ReadModifyWriteRule]]): Required. Rules specifying how the specified row's contents are to be transformed + into writes. Entries are applied in order, meaning that earlier rules will + affect the results of later ones. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRule` + app_profile_id (str): This value specifies routing for replication. If not specified, the + "default" application profile will be used. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "read_modify_write_row" not in self._inner_api_calls: + self._inner_api_calls[ + "read_modify_write_row" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.read_modify_write_row, + default_retry=self._method_configs["ReadModifyWriteRow"].retry, + default_timeout=self._method_configs["ReadModifyWriteRow"].timeout, + client_info=self._client_info, + ) + + request = bigtable_pb2.ReadModifyWriteRowRequest( + table_name=table_name, + row_key=row_key, + rules=rules, + app_profile_id=app_profile_id, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("table_name", table_name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["read_modify_write_row"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) diff --git a/google/cloud/bigtable_v2/gapic/bigtable_client_config.py b/google/cloud/bigtable_v2/gapic/bigtable_client_config.py new file mode 100644 index 000000000..8a57847bf --- /dev/null +++ b/google/cloud/bigtable_v2/gapic/bigtable_client_config.py @@ -0,0 +1,80 @@ +config = { + "interfaces": { + "google.bigtable.v2.Bigtable": { + "retry_codes": { + "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], + "non_idempotent": [], + }, + "retry_params": { + "idempotent_params": { + "initial_retry_delay_millis": 10, + "retry_delay_multiplier": 2.0, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 20000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 20000, + "total_timeout_millis": 600000, + }, + "non_idempotent_params": { + "initial_retry_delay_millis": 10, + "retry_delay_multiplier": 2.0, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 20000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 20000, + "total_timeout_millis": 20000, + }, + "read_rows_params": { + "initial_retry_delay_millis": 10, + "retry_delay_multiplier": 2.0, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 43200000, + }, + "mutate_rows_params": { + "initial_retry_delay_millis": 10, + "retry_delay_multiplier": 2.0, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000, + }, + }, + "methods": { + "ReadRows": { + "timeout_millis": 43200000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "read_rows_params", + }, + "SampleRowKeys": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "MutateRow": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "MutateRows": { + "timeout_millis": 600000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "mutate_rows_params", + }, + "CheckAndMutateRow": { + "timeout_millis": 20000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "ReadModifyWriteRow": { + "timeout_millis": 20000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + }, + } + } +} diff --git a/google/cloud/bigtable_v2/gapic/transports/__init__.py b/google/cloud/bigtable_v2/gapic/transports/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py new file mode 100644 index 000000000..5b2757db2 --- /dev/null +++ b/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py @@ -0,0 +1,207 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import google.api_core.grpc_helpers + +from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc + + +class BigtableGrpcTransport(object): + """gRPC transport class providing stubs for + google.bigtable.v2 Bigtable API. + + The transport provides access to the raw gRPC stubs, + which can be used to take advantage of advanced + features of gRPC. + """ + + # The scopes needed to make gRPC calls to all of the methods defined + # in this service. + _OAUTH_SCOPES = ( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ) + + def __init__( + self, channel=None, credentials=None, address="bigtable.googleapis.com:443" + ): + """Instantiate the transport class. + + Args: + channel (grpc.Channel): A ``Channel`` instance through + which to make calls. This argument is mutually exclusive + with ``credentials``; providing both will raise an exception. + credentials (google.auth.credentials.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If none + are specified, the client will attempt to ascertain the + credentials from the environment. + address (str): The address where the service is hosted. + """ + # If both `channel` and `credentials` are specified, raise an + # exception (channels come with credentials baked in already). + if channel is not None and credentials is not None: + raise ValueError( + "The `channel` and `credentials` arguments are mutually " "exclusive.", + ) + + # Create the channel. + if channel is None: + channel = self.create_channel( + address=address, + credentials=credentials, + options={ + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, + }.items(), + ) + + self._channel = channel + + # gRPC uses objects called "stubs" that are bound to the + # channel and provide a basic method for each RPC. + self._stubs = { + "bigtable_stub": bigtable_pb2_grpc.BigtableStub(channel), + } + + @classmethod + def create_channel( + cls, address="bigtable.googleapis.com:443", credentials=None, **kwargs + ): + """Create and return a gRPC channel object. + + Args: + address (str): The host for the channel to use. + credentials (~.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + kwargs (dict): Keyword arguments, which are passed to the + channel creation. + + Returns: + grpc.Channel: A gRPC channel object. + """ + return google.api_core.grpc_helpers.create_channel( + address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs + ) + + @property + def channel(self): + """The gRPC channel used by the transport. + + Returns: + grpc.Channel: A gRPC channel object. + """ + return self._channel + + @property + def read_rows(self): + """Return the gRPC stub for :meth:`BigtableClient.read_rows`. + + Streams back the contents of all requested rows in key order, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_stub"].ReadRows + + @property + def sample_row_keys(self): + """Return the gRPC stub for :meth:`BigtableClient.sample_row_keys`. + + Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_stub"].SampleRowKeys + + @property + def mutate_row(self): + """Return the gRPC stub for :meth:`BigtableClient.mutate_row`. + + Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by ``mutation``. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_stub"].MutateRow + + @property + def mutate_rows(self): + """Return the gRPC stub for :meth:`BigtableClient.mutate_rows`. + + Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_stub"].MutateRows + + @property + def check_and_mutate_row(self): + """Return the gRPC stub for :meth:`BigtableClient.check_and_mutate_row`. + + Mutates a row atomically based on the output of a predicate Reader filter. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_stub"].CheckAndMutateRow + + @property + def read_modify_write_row(self): + """Return the gRPC stub for :meth:`BigtableClient.read_modify_write_row`. + + Modifies a row atomically on the server. The method reads the latest + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_stub"].ReadModifyWriteRow diff --git a/google/cloud/bigtable_v2/proto/__init__.py b/google/cloud/bigtable_v2/proto/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/google/cloud/bigtable_v2/proto/bigtable_pb2.py new file mode 100644 index 000000000..ba711b20c --- /dev/null +++ b/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -0,0 +1,1798 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/bigtable_v2/proto/bigtable.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 +from google.cloud.bigtable_v2.proto import ( + data_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2, +) +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name="google/cloud/bigtable_v2/proto/bigtable.proto", + package="google.bigtable.v2", + syntax="proto3", + serialized_options=b"\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xd1\x01\n\x0fReadRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"i\n\x14SampleRowKeysRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\xb1\x01\n\x10MutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x13\n\x11MutateRowResponse"\xf9\x01\n\x11MutateRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xa9\x02\n\x18\x43heckAndMutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\xc1\x01\n\x19ReadModifyWriteRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xc4\x0e\n\x08\x42igtable\x12\xc6\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"m\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xd7\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"o\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xed\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"\x92\x01\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xde\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"\x7f\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xd9\x02\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"\xe6\x01\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\x89\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"\x93\x01\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\x93\x02\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3', + dependencies=[ + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2.DESCRIPTOR, + google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR, + google_dot_rpc_dot_status__pb2.DESCRIPTOR, + ], +) + + +_READROWSREQUEST = _descriptor.Descriptor( + name="ReadRowsRequest", + full_name="google.bigtable.v2.ReadRowsRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="table_name", + full_name="google.bigtable.v2.ReadRowsRequest.table_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.v2.ReadRowsRequest.app_profile_id", + index=1, + number=5, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="rows", + full_name="google.bigtable.v2.ReadRowsRequest.rows", + index=2, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="filter", + full_name="google.bigtable.v2.ReadRowsRequest.filter", + index=3, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="rows_limit", + full_name="google.bigtable.v2.ReadRowsRequest.rows_limit", + index=4, + number=4, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=285, + serialized_end=494, +) + + +_READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor( + name="CellChunk", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="row_key", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_key", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="family_name", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.family_name", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="qualifier", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="timestamp_micros", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros", + index=3, + number=4, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="labels", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.labels", + index=4, + number=5, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value", + index=5, + number=6, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="value_size", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value_size", + index=6, + number=7, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="reset_row", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row", + index=7, + number=8, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="commit_row", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row", + index=8, + number=9, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="row_status", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_status", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=612, + serialized_end=873, +) + +_READROWSRESPONSE = _descriptor.Descriptor( + name="ReadRowsResponse", + full_name="google.bigtable.v2.ReadRowsResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="chunks", + full_name="google.bigtable.v2.ReadRowsResponse.chunks", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="last_scanned_row_key", + full_name="google.bigtable.v2.ReadRowsResponse.last_scanned_row_key", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[_READROWSRESPONSE_CELLCHUNK,], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=497, + serialized_end=873, +) + + +_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( + name="SampleRowKeysRequest", + full_name="google.bigtable.v2.SampleRowKeysRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="table_name", + full_name="google.bigtable.v2.SampleRowKeysRequest.table_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.v2.SampleRowKeysRequest.app_profile_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=875, + serialized_end=980, +) + + +_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( + name="SampleRowKeysResponse", + full_name="google.bigtable.v2.SampleRowKeysResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="row_key", + full_name="google.bigtable.v2.SampleRowKeysResponse.row_key", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="offset_bytes", + full_name="google.bigtable.v2.SampleRowKeysResponse.offset_bytes", + index=1, + number=2, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=982, + serialized_end=1044, +) + + +_MUTATEROWREQUEST = _descriptor.Descriptor( + name="MutateRowRequest", + full_name="google.bigtable.v2.MutateRowRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="table_name", + full_name="google.bigtable.v2.MutateRowRequest.table_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.v2.MutateRowRequest.app_profile_id", + index=1, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="row_key", + full_name="google.bigtable.v2.MutateRowRequest.row_key", + index=2, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="mutations", + full_name="google.bigtable.v2.MutateRowRequest.mutations", + index=3, + number=3, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1047, + serialized_end=1224, +) + + +_MUTATEROWRESPONSE = _descriptor.Descriptor( + name="MutateRowResponse", + full_name="google.bigtable.v2.MutateRowResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1226, + serialized_end=1245, +) + + +_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( + name="Entry", + full_name="google.bigtable.v2.MutateRowsRequest.Entry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="row_key", + full_name="google.bigtable.v2.MutateRowsRequest.Entry.row_key", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="mutations", + full_name="google.bigtable.v2.MutateRowsRequest.Entry.mutations", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1419, + serialized_end=1497, +) + +_MUTATEROWSREQUEST = _descriptor.Descriptor( + name="MutateRowsRequest", + full_name="google.bigtable.v2.MutateRowsRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="table_name", + full_name="google.bigtable.v2.MutateRowsRequest.table_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.v2.MutateRowsRequest.app_profile_id", + index=1, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="entries", + full_name="google.bigtable.v2.MutateRowsRequest.entries", + index=2, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[_MUTATEROWSREQUEST_ENTRY,], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1248, + serialized_end=1497, +) + + +_MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor( + name="Entry", + full_name="google.bigtable.v2.MutateRowsResponse.Entry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="index", + full_name="google.bigtable.v2.MutateRowsResponse.Entry.index", + index=0, + number=1, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="status", + full_name="google.bigtable.v2.MutateRowsResponse.Entry.status", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1585, + serialized_end=1643, +) + +_MUTATEROWSRESPONSE = _descriptor.Descriptor( + name="MutateRowsResponse", + full_name="google.bigtable.v2.MutateRowsResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="entries", + full_name="google.bigtable.v2.MutateRowsResponse.entries", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[_MUTATEROWSRESPONSE_ENTRY,], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1500, + serialized_end=1643, +) + + +_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( + name="CheckAndMutateRowRequest", + full_name="google.bigtable.v2.CheckAndMutateRowRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="table_name", + full_name="google.bigtable.v2.CheckAndMutateRowRequest.table_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.v2.CheckAndMutateRowRequest.app_profile_id", + index=1, + number=7, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="row_key", + full_name="google.bigtable.v2.CheckAndMutateRowRequest.row_key", + index=2, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="predicate_filter", + full_name="google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter", + index=3, + number=6, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="true_mutations", + full_name="google.bigtable.v2.CheckAndMutateRowRequest.true_mutations", + index=4, + number=4, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="false_mutations", + full_name="google.bigtable.v2.CheckAndMutateRowRequest.false_mutations", + index=5, + number=5, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1646, + serialized_end=1943, +) + + +_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( + name="CheckAndMutateRowResponse", + full_name="google.bigtable.v2.CheckAndMutateRowResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="predicate_matched", + full_name="google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched", + index=0, + number=1, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1945, + serialized_end=1999, +) + + +_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( + name="ReadModifyWriteRowRequest", + full_name="google.bigtable.v2.ReadModifyWriteRowRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="table_name", + full_name="google.bigtable.v2.ReadModifyWriteRowRequest.table_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.v2.ReadModifyWriteRowRequest.app_profile_id", + index=1, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="row_key", + full_name="google.bigtable.v2.ReadModifyWriteRowRequest.row_key", + index=2, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="rules", + full_name="google.bigtable.v2.ReadModifyWriteRowRequest.rules", + index=3, + number=3, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2002, + serialized_end=2195, +) + + +_READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor( + name="ReadModifyWriteRowResponse", + full_name="google.bigtable.v2.ReadModifyWriteRowResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="row", + full_name="google.bigtable.v2.ReadModifyWriteRowResponse.row", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2197, + serialized_end=2263, +) + +_READROWSREQUEST.fields_by_name[ + "rows" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWSET +_READROWSREQUEST.fields_by_name[ + "filter" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER +_READROWSRESPONSE_CELLCHUNK.fields_by_name[ + "family_name" +].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE +_READROWSRESPONSE_CELLCHUNK.fields_by_name[ + "qualifier" +].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE +_READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE +_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append( + _READROWSRESPONSE_CELLCHUNK.fields_by_name["reset_row"] +) +_READROWSRESPONSE_CELLCHUNK.fields_by_name[ + "reset_row" +].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"] +_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append( + _READROWSRESPONSE_CELLCHUNK.fields_by_name["commit_row"] +) +_READROWSRESPONSE_CELLCHUNK.fields_by_name[ + "commit_row" +].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"] +_READROWSRESPONSE.fields_by_name["chunks"].message_type = _READROWSRESPONSE_CELLCHUNK +_MUTATEROWREQUEST.fields_by_name[ + "mutations" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION +_MUTATEROWSREQUEST_ENTRY.fields_by_name[ + "mutations" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION +_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST +_MUTATEROWSREQUEST.fields_by_name["entries"].message_type = _MUTATEROWSREQUEST_ENTRY +_MUTATEROWSRESPONSE_ENTRY.fields_by_name[ + "status" +].message_type = google_dot_rpc_dot_status__pb2._STATUS +_MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE +_MUTATEROWSRESPONSE.fields_by_name["entries"].message_type = _MUTATEROWSRESPONSE_ENTRY +_CHECKANDMUTATEROWREQUEST.fields_by_name[ + "predicate_filter" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER +_CHECKANDMUTATEROWREQUEST.fields_by_name[ + "true_mutations" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION +_CHECKANDMUTATEROWREQUEST.fields_by_name[ + "false_mutations" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION +_READMODIFYWRITEROWREQUEST.fields_by_name[ + "rules" +].message_type = ( + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._READMODIFYWRITERULE +) +_READMODIFYWRITEROWRESPONSE.fields_by_name[ + "row" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROW +DESCRIPTOR.message_types_by_name["ReadRowsRequest"] = _READROWSREQUEST +DESCRIPTOR.message_types_by_name["ReadRowsResponse"] = _READROWSRESPONSE +DESCRIPTOR.message_types_by_name["SampleRowKeysRequest"] = _SAMPLEROWKEYSREQUEST +DESCRIPTOR.message_types_by_name["SampleRowKeysResponse"] = _SAMPLEROWKEYSRESPONSE +DESCRIPTOR.message_types_by_name["MutateRowRequest"] = _MUTATEROWREQUEST +DESCRIPTOR.message_types_by_name["MutateRowResponse"] = _MUTATEROWRESPONSE +DESCRIPTOR.message_types_by_name["MutateRowsRequest"] = _MUTATEROWSREQUEST +DESCRIPTOR.message_types_by_name["MutateRowsResponse"] = _MUTATEROWSRESPONSE +DESCRIPTOR.message_types_by_name["CheckAndMutateRowRequest"] = _CHECKANDMUTATEROWREQUEST +DESCRIPTOR.message_types_by_name[ + "CheckAndMutateRowResponse" +] = _CHECKANDMUTATEROWRESPONSE +DESCRIPTOR.message_types_by_name[ + "ReadModifyWriteRowRequest" +] = _READMODIFYWRITEROWREQUEST +DESCRIPTOR.message_types_by_name[ + "ReadModifyWriteRowResponse" +] = _READMODIFYWRITEROWRESPONSE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +ReadRowsRequest = _reflection.GeneratedProtocolMessageType( + "ReadRowsRequest", + (_message.Message,), + { + "DESCRIPTOR": _READROWSREQUEST, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Request message for Bigtable.ReadRows. + + Attributes: + table_name: + Required. The unique name of the table from which to read. + Values are of the form + ``projects//instances//tables/
``. + app_profile_id: + This value specifies routing for replication. If not + specified, the “default” application profile will be used. + rows: + The row keys and/or ranges to read. If not specified, reads + from all rows. + filter: + The filter to apply to the contents of the specified row(s). + If unset, reads the entirety of each row. + rows_limit: + The read will terminate after committing to N rows’ worth of + results. The default (zero) is to return all results. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) + }, +) +_sym_db.RegisterMessage(ReadRowsRequest) + +ReadRowsResponse = _reflection.GeneratedProtocolMessageType( + "ReadRowsResponse", + (_message.Message,), + { + "CellChunk": _reflection.GeneratedProtocolMessageType( + "CellChunk", + (_message.Message,), + { + "DESCRIPTOR": _READROWSRESPONSE_CELLCHUNK, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Specifies a piece of a row’s contents returned as part of the read + response stream. + + Attributes: + row_key: + The row key for this chunk of data. If the row key is empty, + this CellChunk is a continuation of the same row as the + previous CellChunk in the response stream, even if that + CellChunk was in a previous ReadRowsResponse message. + family_name: + The column family name for this chunk of data. If this message + is not present this CellChunk is a continuation of the same + column family as the previous CellChunk. The empty string can + occur as a column family name in a response so clients must + check explicitly for the presence of this message, not just + for ``family_name.value`` being non-empty. + qualifier: + The column qualifier for this chunk of data. If this message + is not present, this CellChunk is a continuation of the same + column as the previous CellChunk. Column qualifiers may be + empty so clients must check for the presence of this message, + not just for ``qualifier.value`` being non-empty. + timestamp_micros: + The cell’s stored timestamp, which also uniquely identifies it + within its column. Values are always expressed in + microseconds, but individual tables may set a coarser + granularity to further restrict the allowed values. For + example, a table which specifies millisecond granularity will + only allow values of ``timestamp_micros`` which are multiples + of 1000. Timestamps are only set in the first CellChunk per + cell (for cells split into multiple chunks). + labels: + Labels applied to the cell by a + [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set + on the first CellChunk per cell. + value: + The value stored in the cell. Cell values can be split across + multiple CellChunks. In that case only the value field will be + set in CellChunks after the first: the timestamp and labels + will only be present in the first CellChunk, even if the first + CellChunk came in a previous ReadRowsResponse. + value_size: + If this CellChunk is part of a chunked cell value and this is + not the final chunk of that cell, value_size will be set to + the total length of the cell value. The client can use this + size to pre-allocate memory to hold the full cell value. + row_status: + Signals to the client concerning previous CellChunks received. + reset_row: + Indicates that the client should drop all previous chunks for + ``row_key``, as it will be re-read from the beginning. + commit_row: + Indicates that the client can safely process all previous + chunks for ``row_key``, as its data has been fully read. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) + }, + ), + "DESCRIPTOR": _READROWSRESPONSE, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Response message for Bigtable.ReadRows. + + Attributes: + chunks: + A collection of a row’s contents as part of the read request. + last_scanned_row_key: + Optionally the server might return the row key of the last row + it has scanned. The client can use this to construct a more + efficient retry request if needed: any row keys or portions of + ranges less than this row key can be dropped from the request. + This is primarily useful for cases where the server has read a + lot of data that was filtered out since the last committed row + key, allowing the client to skip that work on a retry. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) + }, +) +_sym_db.RegisterMessage(ReadRowsResponse) +_sym_db.RegisterMessage(ReadRowsResponse.CellChunk) + +SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType( + "SampleRowKeysRequest", + (_message.Message,), + { + "DESCRIPTOR": _SAMPLEROWKEYSREQUEST, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Request message for Bigtable.SampleRowKeys. + + Attributes: + table_name: + Required. The unique name of the table from which to sample + row keys. Values are of the form + ``projects//instances//tables/
``. + app_profile_id: + This value specifies routing for replication. If not + specified, the “default” application profile will be used. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) + }, +) +_sym_db.RegisterMessage(SampleRowKeysRequest) + +SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType( + "SampleRowKeysResponse", + (_message.Message,), + { + "DESCRIPTOR": _SAMPLEROWKEYSRESPONSE, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Response message for Bigtable.SampleRowKeys. + + Attributes: + row_key: + Sorted streamed sequence of sample row keys in the table. The + table might have contents before the first row key in the list + and after the last one, but a key containing the empty string + indicates “end of table” and will be the last response given, + if present. Note that row keys in this list may not have ever + been written to or read from, and users should therefore not + make any assumptions about the row key structure that are + specific to their use case. + offset_bytes: + Approximate total storage space used by all rows in the table + which precede ``row_key``. Buffering the contents of all rows + between two subsequent samples would require space roughly + equal to the difference in their ``offset_bytes`` fields. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) + }, +) +_sym_db.RegisterMessage(SampleRowKeysResponse) + +MutateRowRequest = _reflection.GeneratedProtocolMessageType( + "MutateRowRequest", + (_message.Message,), + { + "DESCRIPTOR": _MUTATEROWREQUEST, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Request message for Bigtable.MutateRow. + + Attributes: + table_name: + Required. The unique name of the table to which the mutation + should be applied. Values are of the form + ``projects//instances//tables/
``. + app_profile_id: + This value specifies routing for replication. If not + specified, the “default” application profile will be used. + row_key: + Required. The key of the row to which the mutation should be + applied. + mutations: + Required. Changes to be atomically applied to the specified + row. Entries are applied in order, meaning that earlier + mutations can be masked by later ones. Must contain at least + one entry and at most 100000. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) + }, +) +_sym_db.RegisterMessage(MutateRowRequest) + +MutateRowResponse = _reflection.GeneratedProtocolMessageType( + "MutateRowResponse", + (_message.Message,), + { + "DESCRIPTOR": _MUTATEROWRESPONSE, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Response message for Bigtable.MutateRow.""", + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) + }, +) +_sym_db.RegisterMessage(MutateRowResponse) + +MutateRowsRequest = _reflection.GeneratedProtocolMessageType( + "MutateRowsRequest", + (_message.Message,), + { + "Entry": _reflection.GeneratedProtocolMessageType( + "Entry", + (_message.Message,), + { + "DESCRIPTOR": _MUTATEROWSREQUEST_ENTRY, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """A mutation for a given row. + + Attributes: + row_key: + The key of the row to which the ``mutations`` should be + applied. + mutations: + Required. Changes to be atomically applied to the specified + row. Mutations are applied in order, meaning that earlier + mutations can be masked by later ones. You must specify at + least one mutation. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) + }, + ), + "DESCRIPTOR": _MUTATEROWSREQUEST, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Request message for BigtableService.MutateRows. + + Attributes: + table_name: + Required. The unique name of the table to which the mutations + should be applied. + app_profile_id: + This value specifies routing for replication. If not + specified, the “default” application profile will be used. + entries: + Required. The row keys and corresponding mutations to be + applied in bulk. Each entry is applied as an atomic mutation, + but the entries may be applied in arbitrary order (even + between entries for the same row). At least one entry must be + specified, and in total the entries can contain at most 100000 + mutations. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) + }, +) +_sym_db.RegisterMessage(MutateRowsRequest) +_sym_db.RegisterMessage(MutateRowsRequest.Entry) + +MutateRowsResponse = _reflection.GeneratedProtocolMessageType( + "MutateRowsResponse", + (_message.Message,), + { + "Entry": _reflection.GeneratedProtocolMessageType( + "Entry", + (_message.Message,), + { + "DESCRIPTOR": _MUTATEROWSRESPONSE_ENTRY, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """The result of applying a passed mutation in the original request. + + Attributes: + index: + The index into the original request’s ``entries`` list of the + Entry for which a result is being reported. + status: + The result of the request Entry identified by ``index``. + Depending on how requests are batched during execution, it is + possible for one Entry to fail due to an error with another + Entry. In the event that this occurs, the same error will be + reported for both entries. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) + }, + ), + "DESCRIPTOR": _MUTATEROWSRESPONSE, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Response message for BigtableService.MutateRows. + + Attributes: + entries: + One or more results for Entries from the batch request. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) + }, +) +_sym_db.RegisterMessage(MutateRowsResponse) +_sym_db.RegisterMessage(MutateRowsResponse.Entry) + +CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType( + "CheckAndMutateRowRequest", + (_message.Message,), + { + "DESCRIPTOR": _CHECKANDMUTATEROWREQUEST, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Request message for Bigtable.CheckAndMutateRow. + + Attributes: + table_name: + Required. The unique name of the table to which the + conditional mutation should be applied. Values are of the form + ``projects//instances//tables/
``. + app_profile_id: + This value specifies routing for replication. If not + specified, the “default” application profile will be used. + row_key: + Required. The key of the row to which the conditional mutation + should be applied. + predicate_filter: + The filter to be applied to the contents of the specified row. + Depending on whether or not any results are yielded, either + ``true_mutations`` or ``false_mutations`` will be executed. If + unset, checks that the row contains any values at all. + true_mutations: + Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when applied to + ``row_key``. Entries are applied in order, meaning that + earlier mutations can be masked by later ones. Must contain at + least one entry if ``false_mutations`` is empty, and at most + 100000. + false_mutations: + Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when applied to + ``row_key``. Entries are applied in order, meaning that + earlier mutations can be masked by later ones. Must contain at + least one entry if ``true_mutations`` is empty, and at most + 100000. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) + }, +) +_sym_db.RegisterMessage(CheckAndMutateRowRequest) + +CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType( + "CheckAndMutateRowResponse", + (_message.Message,), + { + "DESCRIPTOR": _CHECKANDMUTATEROWRESPONSE, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Response message for Bigtable.CheckAndMutateRow. + + Attributes: + predicate_matched: + Whether or not the request’s ``predicate_filter`` yielded any + results for the specified row. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) + }, +) +_sym_db.RegisterMessage(CheckAndMutateRowResponse) + +ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType( + "ReadModifyWriteRowRequest", + (_message.Message,), + { + "DESCRIPTOR": _READMODIFYWRITEROWREQUEST, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Request message for Bigtable.ReadModifyWriteRow. + + Attributes: + table_name: + Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of the + form + ``projects//instances//tables/
``. + app_profile_id: + This value specifies routing for replication. If not + specified, the “default” application profile will be used. + row_key: + Required. The key of the row to which the read/modify/write + rules should be applied. + rules: + Required. Rules specifying how the specified row’s contents + are to be transformed into writes. Entries are applied in + order, meaning that earlier rules will affect the results of + later ones. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) + }, +) +_sym_db.RegisterMessage(ReadModifyWriteRowRequest) + +ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType( + "ReadModifyWriteRowResponse", + (_message.Message,), + { + "DESCRIPTOR": _READMODIFYWRITEROWRESPONSE, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Response message for Bigtable.ReadModifyWriteRow. + + Attributes: + row: + A Row containing the new contents of all cells modified by the + request. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) + }, +) +_sym_db.RegisterMessage(ReadModifyWriteRowResponse) + + +DESCRIPTOR._options = None +_READROWSREQUEST.fields_by_name["table_name"]._options = None +_SAMPLEROWKEYSREQUEST.fields_by_name["table_name"]._options = None +_MUTATEROWREQUEST.fields_by_name["table_name"]._options = None +_MUTATEROWREQUEST.fields_by_name["row_key"]._options = None +_MUTATEROWREQUEST.fields_by_name["mutations"]._options = None +_MUTATEROWSREQUEST_ENTRY.fields_by_name["mutations"]._options = None +_MUTATEROWSREQUEST.fields_by_name["table_name"]._options = None +_MUTATEROWSREQUEST.fields_by_name["entries"]._options = None +_CHECKANDMUTATEROWREQUEST.fields_by_name["table_name"]._options = None +_CHECKANDMUTATEROWREQUEST.fields_by_name["row_key"]._options = None +_READMODIFYWRITEROWREQUEST.fields_by_name["table_name"]._options = None +_READMODIFYWRITEROWREQUEST.fields_by_name["row_key"]._options = None +_READMODIFYWRITEROWREQUEST.fields_by_name["rules"]._options = None + +_BIGTABLE = _descriptor.ServiceDescriptor( + name="Bigtable", + full_name="google.bigtable.v2.Bigtable", + file=DESCRIPTOR, + index=0, + serialized_options=b"\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", + create_key=_descriptor._internal_create_key, + serialized_start=2266, + serialized_end=4126, + methods=[ + _descriptor.MethodDescriptor( + name="ReadRows", + full_name="google.bigtable.v2.Bigtable.ReadRows", + index=0, + containing_service=None, + input_type=_READROWSREQUEST, + output_type=_READROWSRESPONSE, + serialized_options=b'\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*\332A\ntable_name\332A\031table_name,app_profile_id', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="SampleRowKeys", + full_name="google.bigtable.v2.Bigtable.SampleRowKeys", + index=1, + containing_service=None, + input_type=_SAMPLEROWKEYSREQUEST, + output_type=_SAMPLEROWKEYSRESPONSE, + serialized_options=b"\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\332A\ntable_name\332A\031table_name,app_profile_id", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="MutateRow", + full_name="google.bigtable.v2.Bigtable.MutateRow", + index=2, + containing_service=None, + input_type=_MUTATEROWREQUEST, + output_type=_MUTATEROWRESPONSE, + serialized_options=b'\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="MutateRows", + full_name="google.bigtable.v2.Bigtable.MutateRows", + index=3, + containing_service=None, + input_type=_MUTATEROWSREQUEST, + output_type=_MUTATEROWSRESPONSE, + serialized_options=b'\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*\332A\022table_name,entries\332A!table_name,entries,app_profile_id', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="CheckAndMutateRow", + full_name="google.bigtable.v2.Bigtable.CheckAndMutateRow", + index=4, + containing_service=None, + input_type=_CHECKANDMUTATEROWREQUEST, + output_type=_CHECKANDMUTATEROWRESPONSE, + serialized_options=b'\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="ReadModifyWriteRow", + full_name="google.bigtable.v2.Bigtable.ReadModifyWriteRow", + index=5, + containing_service=None, + input_type=_READMODIFYWRITEROWREQUEST, + output_type=_READMODIFYWRITEROWRESPONSE, + serialized_options=b"\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*\332A\030table_name,row_key,rules\332A'table_name,row_key,rules,app_profile_id", + create_key=_descriptor._internal_create_key, + ), + ], +) +_sym_db.RegisterServiceDescriptor(_BIGTABLE) + +DESCRIPTOR.services_by_name["Bigtable"] = _BIGTABLE + +# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py b/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py new file mode 100644 index 000000000..db4ee99f3 --- /dev/null +++ b/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py @@ -0,0 +1,317 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from google.cloud.bigtable_v2.proto import ( + bigtable_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2, +) + + +class BigtableStub(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ReadRows = channel.unary_stream( + "/google.bigtable.v2.Bigtable/ReadRows", + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, + ) + self.SampleRowKeys = channel.unary_stream( + "/google.bigtable.v2.Bigtable/SampleRowKeys", + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, + ) + self.MutateRow = channel.unary_unary( + "/google.bigtable.v2.Bigtable/MutateRow", + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, + ) + self.MutateRows = channel.unary_stream( + "/google.bigtable.v2.Bigtable/MutateRows", + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, + ) + self.CheckAndMutateRow = channel.unary_unary( + "/google.bigtable.v2.Bigtable/CheckAndMutateRow", + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, + ) + self.ReadModifyWriteRow = channel.unary_unary( + "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, + ) + + +class BigtableServicer(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def ReadRows(self, request, context): + """Streams back the contents of all requested rows in key order, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically on the server. The method reads the latest + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + +def add_BigtableServicer_to_server(servicer, server): + rpc_method_handlers = { + "ReadRows": grpc.unary_stream_rpc_method_handler( + servicer.ReadRows, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.SerializeToString, + ), + "SampleRowKeys": grpc.unary_stream_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.SerializeToString, + ), + "MutateRow": grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.SerializeToString, + ), + "MutateRows": grpc.unary_stream_rpc_method_handler( + servicer.MutateRows, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.SerializeToString, + ), + "CheckAndMutateRow": grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.SerializeToString, + ), + "ReadModifyWriteRow": grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + "google.bigtable.v2.Bigtable", rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) + + +# This class is part of an EXPERIMENTAL API. +class Bigtable(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + @staticmethod + def ReadRows( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, + target, + "/google.bigtable.v2.Bigtable/ReadRows", + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def SampleRowKeys( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, + target, + "/google.bigtable.v2.Bigtable/SampleRowKeys", + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def MutateRow( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.v2.Bigtable/MutateRow", + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def MutateRows( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, + target, + "/google.bigtable.v2.Bigtable/MutateRows", + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def CheckAndMutateRow( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.v2.Bigtable/CheckAndMutateRow", + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ReadModifyWriteRow( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/google/cloud/bigtable_v2/proto/data_pb2.py b/google/cloud/bigtable_v2/proto/data_pb2.py new file mode 100644 index 000000000..a64f9b10e --- /dev/null +++ b/google/cloud/bigtable_v2/proto/data_pb2.py @@ -0,0 +1,2668 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/bigtable_v2/proto/data.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +DESCRIPTOR = _descriptor.FileDescriptor( + name="google/cloud/bigtable_v2/proto/data.proto", + package="google.bigtable.v2", + syntax="proto3", + serialized_options=b"\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\xb5\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3', +) + + +_ROW = _descriptor.Descriptor( + name="Row", + full_name="google.bigtable.v2.Row", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.bigtable.v2.Row.key", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="families", + full_name="google.bigtable.v2.Row.families", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=65, + serialized_end=129, +) + + +_FAMILY = _descriptor.Descriptor( + name="Family", + full_name="google.bigtable.v2.Family", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.v2.Family.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="columns", + full_name="google.bigtable.v2.Family.columns", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=131, + serialized_end=198, +) + + +_COLUMN = _descriptor.Descriptor( + name="Column", + full_name="google.bigtable.v2.Column", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="qualifier", + full_name="google.bigtable.v2.Column.qualifier", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="cells", + full_name="google.bigtable.v2.Column.cells", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=200, + serialized_end=268, +) + + +_CELL = _descriptor.Descriptor( + name="Cell", + full_name="google.bigtable.v2.Cell", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="timestamp_micros", + full_name="google.bigtable.v2.Cell.timestamp_micros", + index=0, + number=1, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.v2.Cell.value", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="labels", + full_name="google.bigtable.v2.Cell.labels", + index=2, + number=3, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=270, + serialized_end=333, +) + + +_ROWRANGE = _descriptor.Descriptor( + name="RowRange", + full_name="google.bigtable.v2.RowRange", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="start_key_closed", + full_name="google.bigtable.v2.RowRange.start_key_closed", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="start_key_open", + full_name="google.bigtable.v2.RowRange.start_key_open", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_key_open", + full_name="google.bigtable.v2.RowRange.end_key_open", + index=2, + number=3, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_key_closed", + full_name="google.bigtable.v2.RowRange.end_key_closed", + index=3, + number=4, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="start_key", + full_name="google.bigtable.v2.RowRange.start_key", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + _descriptor.OneofDescriptor( + name="end_key", + full_name="google.bigtable.v2.RowRange.end_key", + index=1, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=336, + serialized_end=474, +) + + +_ROWSET = _descriptor.Descriptor( + name="RowSet", + full_name="google.bigtable.v2.RowSet", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="row_keys", + full_name="google.bigtable.v2.RowSet.row_keys", + index=0, + number=1, + type=12, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="row_ranges", + full_name="google.bigtable.v2.RowSet.row_ranges", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=476, + serialized_end=552, +) + + +_COLUMNRANGE = _descriptor.Descriptor( + name="ColumnRange", + full_name="google.bigtable.v2.ColumnRange", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="family_name", + full_name="google.bigtable.v2.ColumnRange.family_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="start_qualifier_closed", + full_name="google.bigtable.v2.ColumnRange.start_qualifier_closed", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="start_qualifier_open", + full_name="google.bigtable.v2.ColumnRange.start_qualifier_open", + index=2, + number=3, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_qualifier_closed", + full_name="google.bigtable.v2.ColumnRange.end_qualifier_closed", + index=3, + number=4, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_qualifier_open", + full_name="google.bigtable.v2.ColumnRange.end_qualifier_open", + index=4, + number=5, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="start_qualifier", + full_name="google.bigtable.v2.ColumnRange.start_qualifier", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + _descriptor.OneofDescriptor( + name="end_qualifier", + full_name="google.bigtable.v2.ColumnRange.end_qualifier", + index=1, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=555, + serialized_end=753, +) + + +_TIMESTAMPRANGE = _descriptor.Descriptor( + name="TimestampRange", + full_name="google.bigtable.v2.TimestampRange", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="start_timestamp_micros", + full_name="google.bigtable.v2.TimestampRange.start_timestamp_micros", + index=0, + number=1, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_timestamp_micros", + full_name="google.bigtable.v2.TimestampRange.end_timestamp_micros", + index=1, + number=2, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=755, + serialized_end=833, +) + + +_VALUERANGE = _descriptor.Descriptor( + name="ValueRange", + full_name="google.bigtable.v2.ValueRange", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="start_value_closed", + full_name="google.bigtable.v2.ValueRange.start_value_closed", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="start_value_open", + full_name="google.bigtable.v2.ValueRange.start_value_open", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_value_closed", + full_name="google.bigtable.v2.ValueRange.end_value_closed", + index=2, + number=3, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_value_open", + full_name="google.bigtable.v2.ValueRange.end_value_open", + index=3, + number=4, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="start_value", + full_name="google.bigtable.v2.ValueRange.start_value", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + _descriptor.OneofDescriptor( + name="end_value", + full_name="google.bigtable.v2.ValueRange.end_value", + index=1, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=836, + serialized_end=988, +) + + +_ROWFILTER_CHAIN = _descriptor.Descriptor( + name="Chain", + full_name="google.bigtable.v2.RowFilter.Chain", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="filters", + full_name="google.bigtable.v2.RowFilter.Chain.filters", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1807, + serialized_end=1862, +) + +_ROWFILTER_INTERLEAVE = _descriptor.Descriptor( + name="Interleave", + full_name="google.bigtable.v2.RowFilter.Interleave", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="filters", + full_name="google.bigtable.v2.RowFilter.Interleave.filters", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1864, + serialized_end=1924, +) + +_ROWFILTER_CONDITION = _descriptor.Descriptor( + name="Condition", + full_name="google.bigtable.v2.RowFilter.Condition", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="predicate_filter", + full_name="google.bigtable.v2.RowFilter.Condition.predicate_filter", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="true_filter", + full_name="google.bigtable.v2.RowFilter.Condition.true_filter", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="false_filter", + full_name="google.bigtable.v2.RowFilter.Condition.false_filter", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1927, + serialized_end=2100, +) + +_ROWFILTER = _descriptor.Descriptor( + name="RowFilter", + full_name="google.bigtable.v2.RowFilter", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="chain", + full_name="google.bigtable.v2.RowFilter.chain", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="interleave", + full_name="google.bigtable.v2.RowFilter.interleave", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="condition", + full_name="google.bigtable.v2.RowFilter.condition", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="sink", + full_name="google.bigtable.v2.RowFilter.sink", + index=3, + number=16, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="pass_all_filter", + full_name="google.bigtable.v2.RowFilter.pass_all_filter", + index=4, + number=17, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="block_all_filter", + full_name="google.bigtable.v2.RowFilter.block_all_filter", + index=5, + number=18, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="row_key_regex_filter", + full_name="google.bigtable.v2.RowFilter.row_key_regex_filter", + index=6, + number=4, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="row_sample_filter", + full_name="google.bigtable.v2.RowFilter.row_sample_filter", + index=7, + number=14, + type=1, + cpp_type=5, + label=1, + has_default_value=False, + default_value=float(0), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="family_name_regex_filter", + full_name="google.bigtable.v2.RowFilter.family_name_regex_filter", + index=8, + number=5, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="column_qualifier_regex_filter", + full_name="google.bigtable.v2.RowFilter.column_qualifier_regex_filter", + index=9, + number=6, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="column_range_filter", + full_name="google.bigtable.v2.RowFilter.column_range_filter", + index=10, + number=7, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="timestamp_range_filter", + full_name="google.bigtable.v2.RowFilter.timestamp_range_filter", + index=11, + number=8, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="value_regex_filter", + full_name="google.bigtable.v2.RowFilter.value_regex_filter", + index=12, + number=9, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="value_range_filter", + full_name="google.bigtable.v2.RowFilter.value_range_filter", + index=13, + number=15, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="cells_per_row_offset_filter", + full_name="google.bigtable.v2.RowFilter.cells_per_row_offset_filter", + index=14, + number=10, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="cells_per_row_limit_filter", + full_name="google.bigtable.v2.RowFilter.cells_per_row_limit_filter", + index=15, + number=11, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="cells_per_column_limit_filter", + full_name="google.bigtable.v2.RowFilter.cells_per_column_limit_filter", + index=16, + number=12, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="strip_value_transformer", + full_name="google.bigtable.v2.RowFilter.strip_value_transformer", + index=17, + number=13, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="apply_label_transformer", + full_name="google.bigtable.v2.RowFilter.apply_label_transformer", + index=18, + number=19, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION,], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="filter", + full_name="google.bigtable.v2.RowFilter.filter", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=991, + serialized_end=2110, +) + + +_MUTATION_SETCELL = _descriptor.Descriptor( + name="SetCell", + full_name="google.bigtable.v2.Mutation.SetCell", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="family_name", + full_name="google.bigtable.v2.Mutation.SetCell.family_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="column_qualifier", + full_name="google.bigtable.v2.Mutation.SetCell.column_qualifier", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="timestamp_micros", + full_name="google.bigtable.v2.Mutation.SetCell.timestamp_micros", + index=2, + number=3, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.v2.Mutation.SetCell.value", + index=3, + number=4, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2408, + serialized_end=2505, +) + +_MUTATION_DELETEFROMCOLUMN = _descriptor.Descriptor( + name="DeleteFromColumn", + full_name="google.bigtable.v2.Mutation.DeleteFromColumn", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="family_name", + full_name="google.bigtable.v2.Mutation.DeleteFromColumn.family_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="column_qualifier", + full_name="google.bigtable.v2.Mutation.DeleteFromColumn.column_qualifier", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="time_range", + full_name="google.bigtable.v2.Mutation.DeleteFromColumn.time_range", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2507, + serialized_end=2628, +) + +_MUTATION_DELETEFROMFAMILY = _descriptor.Descriptor( + name="DeleteFromFamily", + full_name="google.bigtable.v2.Mutation.DeleteFromFamily", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="family_name", + full_name="google.bigtable.v2.Mutation.DeleteFromFamily.family_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2630, + serialized_end=2669, +) + +_MUTATION_DELETEFROMROW = _descriptor.Descriptor( + name="DeleteFromRow", + full_name="google.bigtable.v2.Mutation.DeleteFromRow", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2671, + serialized_end=2686, +) + +_MUTATION = _descriptor.Descriptor( + name="Mutation", + full_name="google.bigtable.v2.Mutation", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="set_cell", + full_name="google.bigtable.v2.Mutation.set_cell", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="delete_from_column", + full_name="google.bigtable.v2.Mutation.delete_from_column", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="delete_from_family", + full_name="google.bigtable.v2.Mutation.delete_from_family", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="delete_from_row", + full_name="google.bigtable.v2.Mutation.delete_from_row", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[ + _MUTATION_SETCELL, + _MUTATION_DELETEFROMCOLUMN, + _MUTATION_DELETEFROMFAMILY, + _MUTATION_DELETEFROMROW, + ], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="mutation", + full_name="google.bigtable.v2.Mutation.mutation", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=2113, + serialized_end=2698, +) + + +_READMODIFYWRITERULE = _descriptor.Descriptor( + name="ReadModifyWriteRule", + full_name="google.bigtable.v2.ReadModifyWriteRule", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="family_name", + full_name="google.bigtable.v2.ReadModifyWriteRule.family_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="column_qualifier", + full_name="google.bigtable.v2.ReadModifyWriteRule.column_qualifier", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="append_value", + full_name="google.bigtable.v2.ReadModifyWriteRule.append_value", + index=2, + number=3, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"", + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="increment_amount", + full_name="google.bigtable.v2.ReadModifyWriteRule.increment_amount", + index=3, + number=4, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="rule", + full_name="google.bigtable.v2.ReadModifyWriteRule.rule", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=2701, + serialized_end=2829, +) + +_ROW.fields_by_name["families"].message_type = _FAMILY +_FAMILY.fields_by_name["columns"].message_type = _COLUMN +_COLUMN.fields_by_name["cells"].message_type = _CELL +_ROWRANGE.oneofs_by_name["start_key"].fields.append( + _ROWRANGE.fields_by_name["start_key_closed"] +) +_ROWRANGE.fields_by_name[ + "start_key_closed" +].containing_oneof = _ROWRANGE.oneofs_by_name["start_key"] +_ROWRANGE.oneofs_by_name["start_key"].fields.append( + _ROWRANGE.fields_by_name["start_key_open"] +) +_ROWRANGE.fields_by_name["start_key_open"].containing_oneof = _ROWRANGE.oneofs_by_name[ + "start_key" +] +_ROWRANGE.oneofs_by_name["end_key"].fields.append( + _ROWRANGE.fields_by_name["end_key_open"] +) +_ROWRANGE.fields_by_name["end_key_open"].containing_oneof = _ROWRANGE.oneofs_by_name[ + "end_key" +] +_ROWRANGE.oneofs_by_name["end_key"].fields.append( + _ROWRANGE.fields_by_name["end_key_closed"] +) +_ROWRANGE.fields_by_name["end_key_closed"].containing_oneof = _ROWRANGE.oneofs_by_name[ + "end_key" +] +_ROWSET.fields_by_name["row_ranges"].message_type = _ROWRANGE +_COLUMNRANGE.oneofs_by_name["start_qualifier"].fields.append( + _COLUMNRANGE.fields_by_name["start_qualifier_closed"] +) +_COLUMNRANGE.fields_by_name[ + "start_qualifier_closed" +].containing_oneof = _COLUMNRANGE.oneofs_by_name["start_qualifier"] +_COLUMNRANGE.oneofs_by_name["start_qualifier"].fields.append( + _COLUMNRANGE.fields_by_name["start_qualifier_open"] +) +_COLUMNRANGE.fields_by_name[ + "start_qualifier_open" +].containing_oneof = _COLUMNRANGE.oneofs_by_name["start_qualifier"] +_COLUMNRANGE.oneofs_by_name["end_qualifier"].fields.append( + _COLUMNRANGE.fields_by_name["end_qualifier_closed"] +) +_COLUMNRANGE.fields_by_name[ + "end_qualifier_closed" +].containing_oneof = _COLUMNRANGE.oneofs_by_name["end_qualifier"] +_COLUMNRANGE.oneofs_by_name["end_qualifier"].fields.append( + _COLUMNRANGE.fields_by_name["end_qualifier_open"] +) +_COLUMNRANGE.fields_by_name[ + "end_qualifier_open" +].containing_oneof = _COLUMNRANGE.oneofs_by_name["end_qualifier"] +_VALUERANGE.oneofs_by_name["start_value"].fields.append( + _VALUERANGE.fields_by_name["start_value_closed"] +) +_VALUERANGE.fields_by_name[ + "start_value_closed" +].containing_oneof = _VALUERANGE.oneofs_by_name["start_value"] +_VALUERANGE.oneofs_by_name["start_value"].fields.append( + _VALUERANGE.fields_by_name["start_value_open"] +) +_VALUERANGE.fields_by_name[ + "start_value_open" +].containing_oneof = _VALUERANGE.oneofs_by_name["start_value"] +_VALUERANGE.oneofs_by_name["end_value"].fields.append( + _VALUERANGE.fields_by_name["end_value_closed"] +) +_VALUERANGE.fields_by_name[ + "end_value_closed" +].containing_oneof = _VALUERANGE.oneofs_by_name["end_value"] +_VALUERANGE.oneofs_by_name["end_value"].fields.append( + _VALUERANGE.fields_by_name["end_value_open"] +) +_VALUERANGE.fields_by_name[ + "end_value_open" +].containing_oneof = _VALUERANGE.oneofs_by_name["end_value"] +_ROWFILTER_CHAIN.fields_by_name["filters"].message_type = _ROWFILTER +_ROWFILTER_CHAIN.containing_type = _ROWFILTER +_ROWFILTER_INTERLEAVE.fields_by_name["filters"].message_type = _ROWFILTER +_ROWFILTER_INTERLEAVE.containing_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name["predicate_filter"].message_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name["true_filter"].message_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name["false_filter"].message_type = _ROWFILTER +_ROWFILTER_CONDITION.containing_type = _ROWFILTER +_ROWFILTER.fields_by_name["chain"].message_type = _ROWFILTER_CHAIN +_ROWFILTER.fields_by_name["interleave"].message_type = _ROWFILTER_INTERLEAVE +_ROWFILTER.fields_by_name["condition"].message_type = _ROWFILTER_CONDITION +_ROWFILTER.fields_by_name["column_range_filter"].message_type = _COLUMNRANGE +_ROWFILTER.fields_by_name["timestamp_range_filter"].message_type = _TIMESTAMPRANGE +_ROWFILTER.fields_by_name["value_range_filter"].message_type = _VALUERANGE +_ROWFILTER.oneofs_by_name["filter"].fields.append(_ROWFILTER.fields_by_name["chain"]) +_ROWFILTER.fields_by_name["chain"].containing_oneof = _ROWFILTER.oneofs_by_name[ + "filter" +] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["interleave"] +) +_ROWFILTER.fields_by_name["interleave"].containing_oneof = _ROWFILTER.oneofs_by_name[ + "filter" +] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["condition"] +) +_ROWFILTER.fields_by_name["condition"].containing_oneof = _ROWFILTER.oneofs_by_name[ + "filter" +] +_ROWFILTER.oneofs_by_name["filter"].fields.append(_ROWFILTER.fields_by_name["sink"]) +_ROWFILTER.fields_by_name["sink"].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["pass_all_filter"] +) +_ROWFILTER.fields_by_name[ + "pass_all_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["block_all_filter"] +) +_ROWFILTER.fields_by_name[ + "block_all_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["row_key_regex_filter"] +) +_ROWFILTER.fields_by_name[ + "row_key_regex_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["row_sample_filter"] +) +_ROWFILTER.fields_by_name[ + "row_sample_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["family_name_regex_filter"] +) +_ROWFILTER.fields_by_name[ + "family_name_regex_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["column_qualifier_regex_filter"] +) +_ROWFILTER.fields_by_name[ + "column_qualifier_regex_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["column_range_filter"] +) +_ROWFILTER.fields_by_name[ + "column_range_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["timestamp_range_filter"] +) +_ROWFILTER.fields_by_name[ + "timestamp_range_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["value_regex_filter"] +) +_ROWFILTER.fields_by_name[ + "value_regex_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["value_range_filter"] +) +_ROWFILTER.fields_by_name[ + "value_range_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["cells_per_row_offset_filter"] +) +_ROWFILTER.fields_by_name[ + "cells_per_row_offset_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["cells_per_row_limit_filter"] +) +_ROWFILTER.fields_by_name[ + "cells_per_row_limit_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["cells_per_column_limit_filter"] +) +_ROWFILTER.fields_by_name[ + "cells_per_column_limit_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["strip_value_transformer"] +) +_ROWFILTER.fields_by_name[ + "strip_value_transformer" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["apply_label_transformer"] +) +_ROWFILTER.fields_by_name[ + "apply_label_transformer" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_MUTATION_SETCELL.containing_type = _MUTATION +_MUTATION_DELETEFROMCOLUMN.fields_by_name["time_range"].message_type = _TIMESTAMPRANGE +_MUTATION_DELETEFROMCOLUMN.containing_type = _MUTATION +_MUTATION_DELETEFROMFAMILY.containing_type = _MUTATION +_MUTATION_DELETEFROMROW.containing_type = _MUTATION +_MUTATION.fields_by_name["set_cell"].message_type = _MUTATION_SETCELL +_MUTATION.fields_by_name["delete_from_column"].message_type = _MUTATION_DELETEFROMCOLUMN +_MUTATION.fields_by_name["delete_from_family"].message_type = _MUTATION_DELETEFROMFAMILY +_MUTATION.fields_by_name["delete_from_row"].message_type = _MUTATION_DELETEFROMROW +_MUTATION.oneofs_by_name["mutation"].fields.append(_MUTATION.fields_by_name["set_cell"]) +_MUTATION.fields_by_name["set_cell"].containing_oneof = _MUTATION.oneofs_by_name[ + "mutation" +] +_MUTATION.oneofs_by_name["mutation"].fields.append( + _MUTATION.fields_by_name["delete_from_column"] +) +_MUTATION.fields_by_name[ + "delete_from_column" +].containing_oneof = _MUTATION.oneofs_by_name["mutation"] +_MUTATION.oneofs_by_name["mutation"].fields.append( + _MUTATION.fields_by_name["delete_from_family"] +) +_MUTATION.fields_by_name[ + "delete_from_family" +].containing_oneof = _MUTATION.oneofs_by_name["mutation"] +_MUTATION.oneofs_by_name["mutation"].fields.append( + _MUTATION.fields_by_name["delete_from_row"] +) +_MUTATION.fields_by_name["delete_from_row"].containing_oneof = _MUTATION.oneofs_by_name[ + "mutation" +] +_READMODIFYWRITERULE.oneofs_by_name["rule"].fields.append( + _READMODIFYWRITERULE.fields_by_name["append_value"] +) +_READMODIFYWRITERULE.fields_by_name[ + "append_value" +].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name["rule"] +_READMODIFYWRITERULE.oneofs_by_name["rule"].fields.append( + _READMODIFYWRITERULE.fields_by_name["increment_amount"] +) +_READMODIFYWRITERULE.fields_by_name[ + "increment_amount" +].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name["rule"] +DESCRIPTOR.message_types_by_name["Row"] = _ROW +DESCRIPTOR.message_types_by_name["Family"] = _FAMILY +DESCRIPTOR.message_types_by_name["Column"] = _COLUMN +DESCRIPTOR.message_types_by_name["Cell"] = _CELL +DESCRIPTOR.message_types_by_name["RowRange"] = _ROWRANGE +DESCRIPTOR.message_types_by_name["RowSet"] = _ROWSET +DESCRIPTOR.message_types_by_name["ColumnRange"] = _COLUMNRANGE +DESCRIPTOR.message_types_by_name["TimestampRange"] = _TIMESTAMPRANGE +DESCRIPTOR.message_types_by_name["ValueRange"] = _VALUERANGE +DESCRIPTOR.message_types_by_name["RowFilter"] = _ROWFILTER +DESCRIPTOR.message_types_by_name["Mutation"] = _MUTATION +DESCRIPTOR.message_types_by_name["ReadModifyWriteRule"] = _READMODIFYWRITERULE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Row = _reflection.GeneratedProtocolMessageType( + "Row", + (_message.Message,), + { + "DESCRIPTOR": _ROW, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies the complete (requested) contents of a single row of a + table. Rows which exceed 256MiB in size cannot be read in full. + + Attributes: + key: + The unique key which identifies this row within its table. + This is the same key that’s used to identify the row in, for + example, a MutateRowRequest. May contain any non-empty byte + string up to 4KiB in length. + families: + May be empty, but only if the entire row is empty. The mutual + ordering of column families is not specified. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Row) + }, +) +_sym_db.RegisterMessage(Row) + +Family = _reflection.GeneratedProtocolMessageType( + "Family", + (_message.Message,), + { + "DESCRIPTOR": _FAMILY, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies (some of) the contents of a single row/column family + intersection of a table. + + Attributes: + name: + The unique key which identifies this family within its row. + This is the same key that’s used to identify the family in, + for example, a RowFilter which sets its + “family_name_regex_filter” field. Must match + ``[-_.a-zA-Z0-9]+``, except that AggregatingRowProcessors may + produce cells in a sentinel family with an empty name. Must be + no greater than 64 characters in length. + columns: + Must not be empty. Sorted in order of increasing “qualifier”. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Family) + }, +) +_sym_db.RegisterMessage(Family) + +Column = _reflection.GeneratedProtocolMessageType( + "Column", + (_message.Message,), + { + "DESCRIPTOR": _COLUMN, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies (some of) the contents of a single row/column intersection + of a table. + + Attributes: + qualifier: + The unique key which identifies this column within its family. + This is the same key that’s used to identify the column in, + for example, a RowFilter which sets its + ``column_qualifier_regex_filter`` field. May contain any byte + string, including the empty string, up to 16kiB in length. + cells: + Must not be empty. Sorted in order of decreasing + “timestamp_micros”. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Column) + }, +) +_sym_db.RegisterMessage(Column) + +Cell = _reflection.GeneratedProtocolMessageType( + "Cell", + (_message.Message,), + { + "DESCRIPTOR": _CELL, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies (some of) the contents of a single row/column/timestamp of a + table. + + Attributes: + timestamp_micros: + The cell’s stored timestamp, which also uniquely identifies it + within its column. Values are always expressed in + microseconds, but individual tables may set a coarser + granularity to further restrict the allowed values. For + example, a table which specifies millisecond granularity will + only allow values of ``timestamp_micros`` which are multiples + of 1000. + value: + The value stored in the cell. May contain any byte string, + including the empty string, up to 100MiB in length. + labels: + Labels applied to the cell by a + [RowFilter][google.bigtable.v2.RowFilter]. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Cell) + }, +) +_sym_db.RegisterMessage(Cell) + +RowRange = _reflection.GeneratedProtocolMessageType( + "RowRange", + (_message.Message,), + { + "DESCRIPTOR": _ROWRANGE, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies a contiguous range of rows. + + Attributes: + start_key: + The row key at which to start the range. If neither field is + set, interpreted as the empty string, inclusive. + start_key_closed: + Used when giving an inclusive lower bound for the range. + start_key_open: + Used when giving an exclusive lower bound for the range. + end_key: + The row key at which to end the range. If neither field is + set, interpreted as the infinite row key, exclusive. + end_key_open: + Used when giving an exclusive upper bound for the range. + end_key_closed: + Used when giving an inclusive upper bound for the range. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowRange) + }, +) +_sym_db.RegisterMessage(RowRange) + +RowSet = _reflection.GeneratedProtocolMessageType( + "RowSet", + (_message.Message,), + { + "DESCRIPTOR": _ROWSET, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies a non-contiguous set of rows. + + Attributes: + row_keys: + Single rows included in the set. + row_ranges: + Contiguous row ranges included in the set. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowSet) + }, +) +_sym_db.RegisterMessage(RowSet) + +ColumnRange = _reflection.GeneratedProtocolMessageType( + "ColumnRange", + (_message.Message,), + { + "DESCRIPTOR": _COLUMNRANGE, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies a contiguous range of columns within a single column family. + The range spans from : to + :, where both bounds can be either + inclusive or exclusive. + + Attributes: + family_name: + The name of the column family within which this range falls. + start_qualifier: + The column qualifier at which to start the range (within + ``column_family``). If neither field is set, interpreted as + the empty string, inclusive. + start_qualifier_closed: + Used when giving an inclusive lower bound for the range. + start_qualifier_open: + Used when giving an exclusive lower bound for the range. + end_qualifier: + The column qualifier at which to end the range (within + ``column_family``). If neither field is set, interpreted as + the infinite string, exclusive. + end_qualifier_closed: + Used when giving an inclusive upper bound for the range. + end_qualifier_open: + Used when giving an exclusive upper bound for the range. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ColumnRange) + }, +) +_sym_db.RegisterMessage(ColumnRange) + +TimestampRange = _reflection.GeneratedProtocolMessageType( + "TimestampRange", + (_message.Message,), + { + "DESCRIPTOR": _TIMESTAMPRANGE, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specified a contiguous range of microsecond timestamps. + + Attributes: + start_timestamp_micros: + Inclusive lower bound. If left empty, interpreted as 0. + end_timestamp_micros: + Exclusive upper bound. If left empty, interpreted as infinity. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.TimestampRange) + }, +) +_sym_db.RegisterMessage(TimestampRange) + +ValueRange = _reflection.GeneratedProtocolMessageType( + "ValueRange", + (_message.Message,), + { + "DESCRIPTOR": _VALUERANGE, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies a contiguous range of raw byte values. + + Attributes: + start_value: + The value at which to start the range. If neither field is + set, interpreted as the empty string, inclusive. + start_value_closed: + Used when giving an inclusive lower bound for the range. + start_value_open: + Used when giving an exclusive lower bound for the range. + end_value: + The value at which to end the range. If neither field is set, + interpreted as the infinite string, exclusive. + end_value_closed: + Used when giving an inclusive upper bound for the range. + end_value_open: + Used when giving an exclusive upper bound for the range. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ValueRange) + }, +) +_sym_db.RegisterMessage(ValueRange) + +RowFilter = _reflection.GeneratedProtocolMessageType( + "RowFilter", + (_message.Message,), + { + "Chain": _reflection.GeneratedProtocolMessageType( + "Chain", + (_message.Message,), + { + "DESCRIPTOR": _ROWFILTER_CHAIN, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A RowFilter which sends rows through several RowFilters in sequence. + + Attributes: + filters: + The elements of “filters” are chained together to process the + input row: in row -> f(0) -> intermediate row -> f(1) -> … -> + f(N) -> out row The full chain is executed atomically. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Chain) + }, + ), + "Interleave": _reflection.GeneratedProtocolMessageType( + "Interleave", + (_message.Message,), + { + "DESCRIPTOR": _ROWFILTER_INTERLEAVE, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A RowFilter which sends each row to each of several component + RowFilters and interleaves the results. + + Attributes: + filters: + The elements of “filters” all process a copy of the input row, + and the results are pooled, sorted, and combined into a single + output row. If multiple cells are produced with the same + column and timestamp, they will all appear in the output row + in an unspecified mutual order. Consider the following + example, with three filters: :: + input row | + ----------------------------------------------------- + | | | + f(0) f(1) f(2) + | | | 1: + foo,bar,10,x foo,bar,10,z far,bar,7,a + 2: foo,blah,11,z far,blah,5,x + far,blah,5,x | | + | + ----------------------------------------------------- + | 1: foo,bar,10,z // could have + switched with #2 2: foo,bar,10,x // + could have switched with #1 3: + foo,blah,11,z 4: far,bar,7,a 5: + far,blah,5,x // identical to #6 6: + far,blah,5,x // identical to #5 All interleaved filters are + executed atomically. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Interleave) + }, + ), + "Condition": _reflection.GeneratedProtocolMessageType( + "Condition", + (_message.Message,), + { + "DESCRIPTOR": _ROWFILTER_CONDITION, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A RowFilter which evaluates one of two possible RowFilters, depending + on whether or not a predicate RowFilter outputs any cells from the + input row. IMPORTANT NOTE: The predicate filter does not execute + atomically with the true and false filters, which may lead to + inconsistent or unexpected results. Additionally, Condition filters + have poor performance, especially when filters are set for the false + condition. + + Attributes: + predicate_filter: + If ``predicate_filter`` outputs any cells, then + ``true_filter`` will be evaluated on the input row. Otherwise, + ``false_filter`` will be evaluated. + true_filter: + The filter to apply to the input row if ``predicate_filter`` + returns any results. If not provided, no results will be + returned in the true case. + false_filter: + The filter to apply to the input row if ``predicate_filter`` + does not return any results. If not provided, no results will + be returned in the false case. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Condition) + }, + ), + "DESCRIPTOR": _ROWFILTER, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Takes a row as input and produces an alternate view of the row based + on specified rules. For example, a RowFilter might trim down a row to + include just the cells from columns matching a given regular + expression, or might return all the cells of a row but not their + values. More complicated filters can be composed out of these + components to express requests such as, “within every column of a + particular family, give just the two most recent cells which are older + than timestamp X.” There are two broad categories of RowFilters (true + filters and transformers), as well as two ways to compose simple + filters into more complex ones (chains and interleaves). They work as + follows: - True filters alter the input row by excluding some of its + cells wholesale from the output row. An example of a true filter is + the ``value_regex_filter``, which excludes cells whose values don’t + match the specified pattern. All regex true filters use RE2 syntax + (https://github.com/google/re2/wiki/Syntax) in raw byte mode + (RE2::Latin1), and are evaluated as full matches. An important point + to keep in mind is that ``RE2(.)`` is equivalent by default to + ``RE2([^\n])``, meaning that it does not match newlines. When + attempting to match an arbitrary byte, you should therefore use the + escape sequence ``\C``, which may need to be further escaped as + ``\\C`` in your client language. - Transformers alter the input row + by changing the values of some of its cells in the output, without + excluding them completely. Currently, the only supported + transformer is the ``strip_value_transformer``, which replaces + every cell’s value with the empty string. - Chains and + interleaves are described in more detail in the RowFilter.Chain and + RowFilter.Interleave documentation. The total serialized size of a + RowFilter message must not exceed 4096 bytes, and RowFilters may not + be nested within each other (in Chains or Interleaves) to a depth of + more than 20. + + Attributes: + filter: + Which of the possible RowFilter types to apply. If none are + set, this RowFilter returns all cells in the input row. + chain: + Applies several RowFilters to the data in sequence, + progressively narrowing the results. + interleave: + Applies several RowFilters to the data in parallel and + combines the results. + condition: + Applies one of two possible RowFilters to the data based on + the output of a predicate RowFilter. + sink: + ADVANCED USE ONLY. Hook for introspection into the RowFilter. + Outputs all cells directly to the output of the read rather + than to any parent filter. Consider the following example: :: + Chain( FamilyRegex("A"), Interleave( All(), + Chain(Label("foo"), Sink()) ), QualifierRegex("B") + ) A,A,1,w + A,B,2,x B,B,4,z + | FamilyRegex("A") + | A,A,1,w + A,B,2,x | + +------------+-------------+ | + | All() Label(foo) + | | A,A,1,w + A,A,1,w,labels:[foo] A,B,2,x + A,B,2,x,labels:[foo] | | + | Sink() --------------+ | + | | +------------+ x------+ + A,A,1,w,labels:[foo] | + A,B,2,x,labels:[foo] A,A,1,w + | A,B,2,x | + | | + QualifierRegex("B") | + | | + A,B,2,x | + | | + +--------------------------------+ | + A,A,1,w,labels:[foo] + A,B,2,x,labels:[foo] // could be switched + A,B,2,x // could be switched Despite being + excluded by the qualifier filter, a copy of every cell that + reaches the sink is present in the final result. As with an + [Interleave][google.bigtable.v2.RowFilter.Interleave], + duplicate cells are possible, and appear in an unspecified + mutual order. In this case we have a duplicate with column + “A:B” and timestamp 2, because one copy passed through the all + filter while the other was passed through the label and sink. + Note that one copy has label “foo”, while the other does not. + Cannot be used within the ``predicate_filter``, + ``true_filter``, or ``false_filter`` of a + [Condition][google.bigtable.v2.RowFilter.Condition]. + pass_all_filter: + Matches all cells, regardless of input. Functionally + equivalent to leaving ``filter`` unset, but included for + completeness. + block_all_filter: + Does not match any cells, regardless of input. Useful for + temporarily disabling just part of a filter. + row_key_regex_filter: + Matches only cells from rows whose keys satisfy the given RE2 + regex. In other words, passes through the entire row when the + key matches, and otherwise produces an empty row. Note that, + since row keys can contain arbitrary bytes, the ``\C`` escape + sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\n``, which + may be present in a binary key. + row_sample_filter: + Matches all cells from a row with probability p, and matches + no cells from the row with probability 1-p. + family_name_regex_filter: + Matches only cells from columns whose families satisfy the + given RE2 regex. For technical reasons, the regex must not + contain the ``:`` character, even if it is not being used as a + literal. Note that, since column families cannot contain the + new line character ``\n``, it is sufficient to use ``.`` as a + full wildcard when matching column family names. + column_qualifier_regex_filter: + Matches only cells from columns whose qualifiers satisfy the + given RE2 regex. Note that, since column qualifiers can + contain arbitrary bytes, the ``\C`` escape sequence must be + used if a true wildcard is desired. The ``.`` character will + not match the new line character ``\n``, which may be present + in a binary qualifier. + column_range_filter: + Matches only cells from columns within the given range. + timestamp_range_filter: + Matches only cells with timestamps within the given range. + value_regex_filter: + Matches only cells with values that satisfy the given regular + expression. Note that, since cell values can contain arbitrary + bytes, the ``\C`` escape sequence must be used if a true + wildcard is desired. The ``.`` character will not match the + new line character ``\n``, which may be present in a binary + value. + value_range_filter: + Matches only cells with values that fall within the given + range. + cells_per_row_offset_filter: + Skips the first N cells of each row, matching all subsequent + cells. If duplicate cells are present, as is possible when + using an Interleave, each copy of the cell is counted + separately. + cells_per_row_limit_filter: + Matches only the first N cells of each row. If duplicate cells + are present, as is possible when using an Interleave, each + copy of the cell is counted separately. + cells_per_column_limit_filter: + Matches only the most recent N cells within each column. For + example, if N=2, this filter would match column ``foo:bar`` at + timestamps 10 and 9, skip all earlier cells in ``foo:bar``, + and then begin matching again in column ``foo:bar2``. If + duplicate cells are present, as is possible when using an + Interleave, each copy of the cell is counted separately. + strip_value_transformer: + Replaces each cell’s value with the empty string. + apply_label_transformer: + Applies the given label to all cells in the output row. This + allows the client to determine which results were produced + from which part of the filter. Values must be at most 15 + characters in length, and match the RE2 pattern + ``[a-z0-9\\-]+`` Due to a technical limitation, it is not + currently possible to apply multiple labels to a cell. As a + result, a Chain may have no more than one sub-filter which + contains a ``apply_label_transformer``. It is okay for an + Interleave to contain multiple ``apply_label_transformers``, + as they will be applied to separate copies of the input. This + may be relaxed in the future. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter) + }, +) +_sym_db.RegisterMessage(RowFilter) +_sym_db.RegisterMessage(RowFilter.Chain) +_sym_db.RegisterMessage(RowFilter.Interleave) +_sym_db.RegisterMessage(RowFilter.Condition) + +Mutation = _reflection.GeneratedProtocolMessageType( + "Mutation", + (_message.Message,), + { + "SetCell": _reflection.GeneratedProtocolMessageType( + "SetCell", + (_message.Message,), + { + "DESCRIPTOR": _MUTATION_SETCELL, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A Mutation which sets the value of the specified cell. + + Attributes: + family_name: + The name of the family into which new data should be written. + Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier: + The qualifier of the column into which new data should be + written. Can be any byte string, including the empty string. + timestamp_micros: + The timestamp of the cell into which new data should be + written. Use -1 for current Bigtable server time. Otherwise, + the client should set this value itself, noting that the + default value is a timestamp of zero if the field is left + unspecified. Values must match the granularity of the table + (e.g. micros, millis). + value: + The value to be written into the specified cell. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.SetCell) + }, + ), + "DeleteFromColumn": _reflection.GeneratedProtocolMessageType( + "DeleteFromColumn", + (_message.Message,), + { + "DESCRIPTOR": _MUTATION_DELETEFROMCOLUMN, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A Mutation which deletes cells from the specified column, optionally + restricting the deletions to a given timestamp range. + + Attributes: + family_name: + The name of the family from which cells should be deleted. + Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier: + The qualifier of the column from which cells should be + deleted. Can be any byte string, including the empty string. + time_range: + The range of timestamps within which cells should be deleted. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromColumn) + }, + ), + "DeleteFromFamily": _reflection.GeneratedProtocolMessageType( + "DeleteFromFamily", + (_message.Message,), + { + "DESCRIPTOR": _MUTATION_DELETEFROMFAMILY, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A Mutation which deletes all cells from the specified column family. + + Attributes: + family_name: + The name of the family from which cells should be deleted. + Must match ``[-_.a-zA-Z0-9]+`` + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromFamily) + }, + ), + "DeleteFromRow": _reflection.GeneratedProtocolMessageType( + "DeleteFromRow", + (_message.Message,), + { + "DESCRIPTOR": _MUTATION_DELETEFROMROW, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A Mutation which deletes all cells from the containing row.""", + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) + }, + ), + "DESCRIPTOR": _MUTATION, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies a particular change to be made to the contents of a row. + + Attributes: + mutation: + Which of the possible Mutation types to apply. + set_cell: + Set a cell’s value. + delete_from_column: + Deletes cells from a column. + delete_from_family: + Deletes cells from a column family. + delete_from_row: + Deletes cells from the entire row. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation) + }, +) +_sym_db.RegisterMessage(Mutation) +_sym_db.RegisterMessage(Mutation.SetCell) +_sym_db.RegisterMessage(Mutation.DeleteFromColumn) +_sym_db.RegisterMessage(Mutation.DeleteFromFamily) +_sym_db.RegisterMessage(Mutation.DeleteFromRow) + +ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType( + "ReadModifyWriteRule", + (_message.Message,), + { + "DESCRIPTOR": _READMODIFYWRITERULE, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies an atomic read/modify/write operation on the latest value of + the specified column. + + Attributes: + family_name: + The name of the family to which the read/modify/write should + be applied. Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier: + The qualifier of the column to which the read/modify/write + should be applied. Can be any byte string, including the empty + string. + rule: + The rule used to determine the column’s new latest value from + its current latest value. + append_value: + Rule specifying that ``append_value`` be appended to the + existing value. If the targeted cell is unset, it will be + treated as containing the empty string. + increment_amount: + Rule specifying that ``increment_amount`` be added to the + existing value. If the targeted cell is unset, it will be + treated as containing a zero. Otherwise, the targeted cell + must contain an 8-byte value (interpreted as a 64-bit big- + endian signed integer), or the entire request will fail. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRule) + }, +) +_sym_db.RegisterMessage(ReadModifyWriteRule) + + +DESCRIPTOR._options = None +# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_v2/proto/data_pb2_grpc.py b/google/cloud/bigtable_v2/proto/data_pb2_grpc.py new file mode 100644 index 000000000..8a9393943 --- /dev/null +++ b/google/cloud/bigtable_v2/proto/data_pb2_grpc.py @@ -0,0 +1,3 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc diff --git a/google/cloud/bigtable_v2/types.py b/google/cloud/bigtable_v2/types.py new file mode 100644 index 000000000..607e1b09c --- /dev/null +++ b/google/cloud/bigtable_v2/types.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from __future__ import absolute_import +import sys + +from google.api_core.protobuf_helpers import get_messages + +from google.cloud.bigtable_v2.proto import bigtable_pb2 +from google.cloud.bigtable_v2.proto import data_pb2 +from google.protobuf import any_pb2 +from google.protobuf import wrappers_pb2 +from google.rpc import status_pb2 + + +_shared_modules = [ + any_pb2, + wrappers_pb2, + status_pb2, +] + +_local_modules = [ + bigtable_pb2, + data_pb2, +] + +names = [] + +for module in _shared_modules: # pragma: NO COVER + for name, message in get_messages(module).items(): + setattr(sys.modules[__name__], name, message) + names.append(name) +for module in _local_modules: + for name, message in get_messages(module).items(): + message.__module__ = "google.cloud.bigtable_v2.types" + setattr(sys.modules[__name__], name, message) + names.append(name) + + +__all__ = tuple(sorted(names)) diff --git a/noxfile.py b/noxfile.py index 70d9c13c2..9e90799f8 100644 --- a/noxfile.py +++ b/noxfile.py @@ -154,7 +154,7 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=99") + session.run("coverage", "report", "--show-missing", "--fail-under=100") session.run("coverage", "erase") diff --git a/synth.metadata b/synth.metadata index 4416e5d4e..59b837240 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "6fe87016a159bbdc6bf29856b1cf6e633e16216a" + "sha": "76c4e9abd59ffac5fb2d8fb2298d04b9e1128d8c" } }, { @@ -51,7 +51,6 @@ } ], "generatedFiles": [ - ".coveragerc", ".flake8", ".github/CONTRIBUTING.md", ".github/ISSUE_TEMPLATE/bug_report.md", @@ -147,6 +146,7 @@ "google/cloud/bigtable_v2/proto/data_pb2.py", "google/cloud/bigtable_v2/proto/data_pb2_grpc.py", "google/cloud/bigtable_v2/types.py", + "noxfile.py", "renovate.json", "samples/AUTHORING_GUIDE.md", "samples/CONTRIBUTING.md", diff --git a/tests/unit/gapic/v2/test_bigtable_client_v2.py b/tests/unit/gapic/v2/test_bigtable_client_v2.py new file mode 100644 index 000000000..84abfecef --- /dev/null +++ b/tests/unit/gapic/v2/test_bigtable_client_v2.py @@ -0,0 +1,316 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests.""" + +import mock +import pytest + +from google.cloud import bigtable_v2 +from google.cloud.bigtable_v2.proto import bigtable_pb2 + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + response = None + if self.channel_stub.responses: + response = self.channel_stub.responses.pop() + + if isinstance(response, Exception): + raise response + + if response: + return response + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, method, request_serializer=None, response_deserializer=None): + return MultiCallableStub(method, self) + + def unary_stream(self, method, request_serializer=None, response_deserializer=None): + return MultiCallableStub(method, self) + + +class CustomException(Exception): + pass + + +class TestBigtableClient(object): + def test_read_rows(self): + # Setup Expected Response + last_scanned_row_key = b"-126" + expected_response = {"last_scanned_row_key": last_scanned_row_key} + expected_response = bigtable_pb2.ReadRowsResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[iter([expected_response])]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.read_rows(table_name) + resources = list(response) + assert len(resources) == 1 + assert expected_response == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.ReadRowsRequest(table_name=table_name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_read_rows_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.read_rows(table_name) + + def test_sample_row_keys(self): + # Setup Expected Response + row_key = b"122" + offset_bytes = 889884095 + expected_response = {"row_key": row_key, "offset_bytes": offset_bytes} + expected_response = bigtable_pb2.SampleRowKeysResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[iter([expected_response])]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.sample_row_keys(table_name) + resources = list(response) + assert len(resources) == 1 + assert expected_response == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.SampleRowKeysRequest(table_name=table_name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_sample_row_keys_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.sample_row_keys(table_name) + + def test_mutate_row(self): + # Setup Expected Response + expected_response = {} + expected_response = bigtable_pb2.MutateRowResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + mutations = [] + + response = client.mutate_row(table_name, row_key, mutations) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.MutateRowRequest( + table_name=table_name, row_key=row_key, mutations=mutations + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_mutate_row_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + mutations = [] + + with pytest.raises(CustomException): + client.mutate_row(table_name, row_key, mutations) + + def test_mutate_rows(self): + # Setup Expected Response + expected_response = {} + expected_response = bigtable_pb2.MutateRowsResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[iter([expected_response])]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + entries = [] + + response = client.mutate_rows(table_name, entries) + resources = list(response) + assert len(resources) == 1 + assert expected_response == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.MutateRowsRequest( + table_name=table_name, entries=entries + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_mutate_rows_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + entries = [] + + with pytest.raises(CustomException): + client.mutate_rows(table_name, entries) + + def test_check_and_mutate_row(self): + # Setup Expected Response + predicate_matched = True + expected_response = {"predicate_matched": predicate_matched} + expected_response = bigtable_pb2.CheckAndMutateRowResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + + response = client.check_and_mutate_row(table_name, row_key) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.CheckAndMutateRowRequest( + table_name=table_name, row_key=row_key + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_check_and_mutate_row_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + + with pytest.raises(CustomException): + client.check_and_mutate_row(table_name, row_key) + + def test_read_modify_write_row(self): + # Setup Expected Response + expected_response = {} + expected_response = bigtable_pb2.ReadModifyWriteRowResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + rules = [] + + response = client.read_modify_write_row(table_name, row_key, rules) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.ReadModifyWriteRowRequest( + table_name=table_name, row_key=row_key, rules=rules + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_read_modify_write_row_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + rules = [] + + with pytest.raises(CustomException): + client.read_modify_write_row(table_name, row_key, rules) diff --git a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py new file mode 100644 index 000000000..df083406b --- /dev/null +++ b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -0,0 +1,924 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests.""" + +import mock +import pytest + +from google.rpc import status_pb2 + +from google.cloud import bigtable_admin_v2 +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import instance_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import policy_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + response = None + if self.channel_stub.responses: + response = self.channel_stub.responses.pop() + + if isinstance(response, Exception): + raise response + + if response: + return response + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, method, request_serializer=None, response_deserializer=None): + return MultiCallableStub(method, self) + + +class CustomException(Exception): + pass + + +class TestBigtableInstanceAdminClient(object): + def test_create_instance(self): + # Setup Expected Response + name = "name3373707" + display_name = "displayName1615086568" + expected_response = {"name": name, "display_name": display_name} + expected_response = instance_pb2.Instance(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_instance", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.project_path("[PROJECT]") + instance_id = "instanceId-2101995259" + instance = {} + clusters = {} + + response = client.create_instance(parent, instance_id, instance, clusters) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest( + parent=parent, instance_id=instance_id, instance=instance, clusters=clusters + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_instance_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_instance_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.project_path("[PROJECT]") + instance_id = "instanceId-2101995259" + instance = {} + clusters = {} + + response = client.create_instance(parent, instance_id, instance, clusters) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_instance(self): + # Setup Expected Response + name_2 = "name2-1052831874" + display_name = "displayName1615086568" + expected_response = {"name": name_2, "display_name": display_name} + expected_response = instance_pb2.Instance(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + response = client.get_instance(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + with pytest.raises(CustomException): + client.get_instance(name) + + def test_list_instances(self): + # Setup Expected Response + next_page_token = "nextPageToken-1530815211" + expected_response = {"next_page_token": next_page_token} + expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.project_path("[PROJECT]") + + response = client.list_instances(parent) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( + parent=parent + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_instances_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.project_path("[PROJECT]") + + with pytest.raises(CustomException): + client.list_instances(parent) + + def test_update_instance(self): + # Setup Expected Response + name = "name3373707" + display_name_2 = "displayName21615000987" + expected_response = {"name": name, "display_name": display_name_2} + expected_response = instance_pb2.Instance(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + display_name = "displayName1615086568" + + response = client.update_instance(display_name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = instance_pb2.Instance(display_name=display_name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + display_name = "displayName1615086568" + + with pytest.raises(CustomException): + client.update_instance(display_name) + + def test_partial_update_instance(self): + # Setup Expected Response + name = "name3373707" + display_name = "displayName1615086568" + expected_response = {"name": name, "display_name": display_name} + expected_response = instance_pb2.Instance(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_partial_update_instance", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + instance = {} + update_mask = {} + + response = client.partial_update_instance(instance, update_mask) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( + instance=instance, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_partial_update_instance_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_partial_update_instance_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + instance = {} + update_mask = {} + + response = client.partial_update_instance(instance, update_mask) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_instance(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + client.delete_instance(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + with pytest.raises(CustomException): + client.delete_instance(name) + + def test_create_cluster(self): + # Setup Expected Response + name = "name3373707" + location = "location1901043637" + serve_nodes = 1288838783 + expected_response = { + "name": name, + "location": location, + "serve_nodes": serve_nodes, + } + expected_response = instance_pb2.Cluster(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_cluster", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + cluster_id = "clusterId240280960" + cluster = {} + + response = client.create_cluster(parent, cluster_id, cluster) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( + parent=parent, cluster_id=cluster_id, cluster=cluster + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_cluster_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_cluster_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + cluster_id = "clusterId240280960" + cluster = {} + + response = client.create_cluster(parent, cluster_id, cluster) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_cluster(self): + # Setup Expected Response + name_2 = "name2-1052831874" + location = "location1901043637" + serve_nodes = 1288838783 + expected_response = { + "name": name_2, + "location": location, + "serve_nodes": serve_nodes, + } + expected_response = instance_pb2.Cluster(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + response = client.get_cluster(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.GetClusterRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_cluster_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + with pytest.raises(CustomException): + client.get_cluster(name) + + def test_list_clusters(self): + # Setup Expected Response + next_page_token = "nextPageToken-1530815211" + expected_response = {"next_page_token": next_page_token} + expected_response = bigtable_instance_admin_pb2.ListClustersResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + response = client.list_clusters(parent) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.ListClustersRequest( + parent=parent + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_clusters_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + with pytest.raises(CustomException): + client.list_clusters(parent) + + def test_update_cluster(self): + # Setup Expected Response + name = "name3373707" + location = "location1901043637" + serve_nodes_2 = 1623486220 + expected_response = { + "name": name, + "location": location, + "serve_nodes": serve_nodes_2, + } + expected_response = instance_pb2.Cluster(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_update_cluster", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + serve_nodes = 1288838783 + + response = client.update_cluster(serve_nodes) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = instance_pb2.Cluster(serve_nodes=serve_nodes) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_cluster_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_update_cluster_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + serve_nodes = 1288838783 + + response = client.update_cluster(serve_nodes) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_cluster(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + client.delete_cluster(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_cluster_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + with pytest.raises(CustomException): + client.delete_cluster(name) + + def test_create_app_profile(self): + # Setup Expected Response + name = "name3373707" + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name, "etag": etag, "description": description} + expected_response = instance_pb2.AppProfile(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + app_profile_id = "appProfileId1262094415" + app_profile = {} + + response = client.create_app_profile(parent, app_profile_id, app_profile) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest( + parent=parent, app_profile_id=app_profile_id, app_profile=app_profile + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_app_profile_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + app_profile_id = "appProfileId1262094415" + app_profile = {} + + with pytest.raises(CustomException): + client.create_app_profile(parent, app_profile_id, app_profile) + + def test_get_app_profile(self): + # Setup Expected Response + name_2 = "name2-1052831874" + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name_2, "etag": etag, "description": description} + expected_response = instance_pb2.AppProfile(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + + response = client.get_app_profile(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_app_profile_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + + with pytest.raises(CustomException): + client.get_app_profile(name) + + def test_list_app_profiles(self): + # Setup Expected Response + next_page_token = "" + app_profiles_element = {} + app_profiles = [app_profiles_element] + expected_response = { + "next_page_token": next_page_token, + "app_profiles": app_profiles, + } + expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_app_profiles(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.app_profiles[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest( + parent=parent + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_app_profiles_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_app_profiles(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_update_app_profile(self): + # Setup Expected Response + name = "name3373707" + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name, "etag": etag, "description": description} + expected_response = instance_pb2.AppProfile(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_update_app_profile", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + app_profile = {} + update_mask = {} + + response = client.update_app_profile(app_profile, update_mask) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( + app_profile=app_profile, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_app_profile_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_update_app_profile_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + app_profile = {} + update_mask = {} + + response = client.update_app_profile(app_profile, update_mask) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_app_profile(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + + client.delete_app_profile(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( + name=name + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_app_profile_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + + with pytest.raises(CustomException): + client.delete_app_profile(name) + + def test_get_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"etag3123477" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + resource = "resource-341064690" + + response = client.get_iam_policy(resource) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + resource = "resource-341064690" + + with pytest.raises(CustomException): + client.get_iam_policy(resource) + + def test_set_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"etag3123477" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + resource = "resource-341064690" + policy = {} + + response = client.set_iam_policy(resource, policy) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, policy=policy + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_set_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + resource = "resource-341064690" + policy = {} + + with pytest.raises(CustomException): + client.set_iam_policy(resource, policy) + + def test_test_iam_permissions(self): + # Setup Expected Response + expected_response = {} + expected_response = iam_policy_pb2.TestIamPermissionsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + resource = "resource-341064690" + permissions = [] + + response = client.test_iam_permissions(resource, permissions) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_test_iam_permissions_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + resource = "resource-341064690" + permissions = [] + + with pytest.raises(CustomException): + client.test_iam_permissions(resource, permissions) diff --git a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py new file mode 100644 index 000000000..42db08579 --- /dev/null +++ b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -0,0 +1,1039 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests.""" + +import mock +import pytest + +from google.rpc import status_pb2 + +from google.cloud import bigtable_admin_v2 +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import policy_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + response = None + if self.channel_stub.responses: + response = self.channel_stub.responses.pop() + + if isinstance(response, Exception): + raise response + + if response: + return response + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, method, request_serializer=None, response_deserializer=None): + return MultiCallableStub(method, self) + + +class CustomException(Exception): + pass + + +class TestBigtableTableAdminClient(object): + def test_create_table(self): + # Setup Expected Response + name = "name3373707" + expected_response = {"name": name} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + table = {} + + response = client.create_table(parent, table_id, table) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateTableRequest( + parent=parent, table_id=table_id, table=table + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + table = {} + + with pytest.raises(CustomException): + client.create_table(parent, table_id, table) + + def test_create_table_from_snapshot(self): + # Setup Expected Response + name = "name3373707" + expected_response = {"name": name} + expected_response = table_pb2.Table(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_table_from_snapshot", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + source_snapshot = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + response = client.create_table_from_snapshot(parent, table_id, source_snapshot) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( + parent=parent, table_id=table_id, source_snapshot=source_snapshot + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_table_from_snapshot_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_table_from_snapshot_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + source_snapshot = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + response = client.create_table_from_snapshot(parent, table_id, source_snapshot) + exception = response.exception() + assert exception.errors[0] == error + + def test_list_tables(self): + # Setup Expected Response + next_page_token = "" + tables_element = {} + tables = [tables_element] + expected_response = {"next_page_token": next_page_token, "tables": tables} + expected_response = bigtable_table_admin_pb2.ListTablesResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_tables(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.tables[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ListTablesRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_tables_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_tables(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_get_table(self): + # Setup Expected Response + name_2 = "name2-1052831874" + expected_response = {"name": name_2} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.get_table(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GetTableRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.get_table(name) + + def test_delete_table(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + client.delete_table(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteTableRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.delete_table(name) + + def test_modify_column_families(self): + # Setup Expected Response + name_2 = "name2-1052831874" + expected_response = {"name": name_2} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + modifications = [] + + response = client.modify_column_families(name, modifications) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( + name=name, modifications=modifications + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_modify_column_families_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + modifications = [] + + with pytest.raises(CustomException): + client.modify_column_families(name, modifications) + + def test_drop_row_range(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + client.drop_row_range(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DropRowRangeRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_drop_row_range_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.drop_row_range(name) + + def test_generate_consistency_token(self): + # Setup Expected Response + consistency_token = "consistencyToken-1090516718" + expected_response = {"consistency_token": consistency_token} + expected_response = bigtable_table_admin_pb2.GenerateConsistencyTokenResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.generate_consistency_token(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( + name=name + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_generate_consistency_token_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.generate_consistency_token(name) + + def test_check_consistency(self): + # Setup Expected Response + consistent = True + expected_response = {"consistent": consistent} + expected_response = bigtable_table_admin_pb2.CheckConsistencyResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + consistency_token = "consistencyToken-1090516718" + + response = client.check_consistency(name, consistency_token) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CheckConsistencyRequest( + name=name, consistency_token=consistency_token + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_check_consistency_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + consistency_token = "consistencyToken-1090516718" + + with pytest.raises(CustomException): + client.check_consistency(name, consistency_token) + + def test_get_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"etag3123477" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + resource = "resource-341064690" + + response = client.get_iam_policy(resource) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + resource = "resource-341064690" + + with pytest.raises(CustomException): + client.get_iam_policy(resource) + + def test_set_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"etag3123477" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + resource = "resource-341064690" + policy = {} + + response = client.set_iam_policy(resource, policy) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, policy=policy + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_set_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + resource = "resource-341064690" + policy = {} + + with pytest.raises(CustomException): + client.set_iam_policy(resource, policy) + + def test_test_iam_permissions(self): + # Setup Expected Response + expected_response = {} + expected_response = iam_policy_pb2.TestIamPermissionsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + resource = "resource-341064690" + permissions = [] + + response = client.test_iam_permissions(resource, permissions) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_test_iam_permissions_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + resource = "resource-341064690" + permissions = [] + + with pytest.raises(CustomException): + client.test_iam_permissions(resource, permissions) + + def test_snapshot_table(self): + # Setup Expected Response + name_2 = "name2-1052831874" + data_size_bytes = 2110122398 + description = "description-1724546052" + expected_response = { + "name": name_2, + "data_size_bytes": data_size_bytes, + "description": description, + } + expected_response = table_pb2.Snapshot(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_snapshot_table", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + snapshot_id = "snapshotId-168585866" + + response = client.snapshot_table(name, cluster, snapshot_id) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( + name=name, cluster=cluster, snapshot_id=snapshot_id + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_snapshot_table_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_snapshot_table_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + snapshot_id = "snapshotId-168585866" + + response = client.snapshot_table(name, cluster, snapshot_id) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_snapshot(self): + # Setup Expected Response + name_2 = "name2-1052831874" + data_size_bytes = 2110122398 + description = "description-1724546052" + expected_response = { + "name": name_2, + "data_size_bytes": data_size_bytes, + "description": description, + } + expected_response = table_pb2.Snapshot(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + response = client.get_snapshot(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_snapshot_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + with pytest.raises(CustomException): + client.get_snapshot(name) + + def test_list_snapshots(self): + # Setup Expected Response + next_page_token = "" + snapshots_element = {} + snapshots = [snapshots_element] + expected_response = {"next_page_token": next_page_token, "snapshots": snapshots} + expected_response = bigtable_table_admin_pb2.ListSnapshotsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_snapshots(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.snapshots[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_snapshots_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_snapshots(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_delete_snapshot(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + client.delete_snapshot(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_snapshot_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + with pytest.raises(CustomException): + client.delete_snapshot(name) + + def test_create_backup(self): + # Setup Expected Response + name = "name3373707" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_backup", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + backup_id = "backupId1355353272" + backup = {} + + response = client.create_backup(parent, backup_id, backup) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateBackupRequest( + parent=parent, backup_id=backup_id, backup=backup + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_backup_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_backup_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + backup_id = "backupId1355353272" + backup = {} + + response = client.create_backup(parent, backup_id, backup) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_backup(self): + # Setup Expected Response + name_2 = "name2-1052831874" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name_2, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + response = client.get_backup(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GetBackupRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + with pytest.raises(CustomException): + client.get_backup(name) + + def test_list_backups(self): + # Setup Expected Response + next_page_token = "" + backups_element = {} + backups = [backups_element] + expected_response = {"next_page_token": next_page_token, "backups": backups} + expected_response = bigtable_table_admin_pb2.ListBackupsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_backups(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.backups[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ListBackupsRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_backups_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_backups(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_update_backup(self): + # Setup Expected Response + name = "name3373707" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + backup = {} + update_mask = {} + + response = client.update_backup(backup, update_mask) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.UpdateBackupRequest( + backup=backup, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + backup = {} + update_mask = {} + + with pytest.raises(CustomException): + client.update_backup(backup, update_mask) + + def test_delete_backup(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + client.delete_backup(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + with pytest.raises(CustomException): + client.delete_backup(name) + + def test_restore_table(self): + # Setup Expected Response + name = "name3373707" + expected_response = {"name": name} + expected_response = table_pb2.Table(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_restore_table", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + response = client.restore_table() + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.RestoreTableRequest() + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_restore_table_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_restore_table_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + response = client.restore_table() + exception = response.exception() + assert exception.errors[0] == error From 882fa756c4415bdc2ea686f198a2829251524a55 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 13 Feb 2021 06:02:56 -0800 Subject: [PATCH 02/11] Bump gapic-generator to 2.6.1. - Fix a scenario where generator attempts to assign a string to an integer in tests by using a separate value generator in test generation PiperOrigin-RevId: 336931287 Source-Author: Google APIs Source-Date: Tue Oct 13 12:29:21 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 3a935fab757e09c72afd4aa121147a4c97dccc3e Source-Link: https://github.com/googleapis/googleapis/commit/3a935fab757e09c72afd4aa121147a4c97dccc3e --- synth.metadata | 4 ++-- tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py | 4 ++-- tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/synth.metadata b/synth.metadata index 59b837240..6ad9d5acf 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "8d73f9486fc193a150f6c907dfb9f49431aff3ff", - "internalRef": "332497859" + "sha": "3a935fab757e09c72afd4aa121147a4c97dccc3e", + "internalRef": "336931287" } }, { diff --git a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py index df083406b..035b5aa00 100644 --- a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -801,7 +801,7 @@ def test_delete_app_profile_exception(self): def test_get_iam_policy(self): # Setup Expected Response version = 351608024 - etag = b"etag3123477" + etag = b"21" expected_response = {"version": version, "etag": etag} expected_response = policy_pb2.Policy(**expected_response) @@ -840,7 +840,7 @@ def test_get_iam_policy_exception(self): def test_set_iam_policy(self): # Setup Expected Response version = 351608024 - etag = b"etag3123477" + etag = b"21" expected_response = {"version": version, "etag": etag} expected_response = policy_pb2.Policy(**expected_response) diff --git a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py index 42db08579..72719a0de 100644 --- a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -449,7 +449,7 @@ def test_check_consistency_exception(self): def test_get_iam_policy(self): # Setup Expected Response version = 351608024 - etag = b"etag3123477" + etag = b"21" expected_response = {"version": version, "etag": etag} expected_response = policy_pb2.Policy(**expected_response) @@ -488,7 +488,7 @@ def test_get_iam_policy_exception(self): def test_set_iam_policy(self): # Setup Expected Response version = 351608024 - etag = b"etag3123477" + etag = b"21" expected_response = {"version": version, "etag": etag} expected_response = policy_pb2.Policy(**expected_response) From 611e6b9f34212359848a724a1ef96b4acf784236 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 13 Feb 2021 06:03:13 -0800 Subject: [PATCH 03/11] chore: update proto definitions for bigquery/v2 to support BQML statistics PiperOrigin-RevId: 337113354 Source-Author: Google APIs Source-Date: Wed Oct 14 10:04:20 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 215c12ade72d9d9616457d9b8b2f8a37f38e79f3 Source-Link: https://github.com/googleapis/googleapis/commit/215c12ade72d9d9616457d9b8b2f8a37f38e79f3 --- synth.metadata | 9 +- .../unit/gapic/v2/test_bigtable_client_v2.py | 316 ----- .../test_bigtable_instance_admin_client_v2.py | 924 --------------- .../v2/test_bigtable_table_admin_client_v2.py | 1039 ----------------- 4 files changed, 3 insertions(+), 2285 deletions(-) delete mode 100644 tests/unit/gapic/v2/test_bigtable_client_v2.py delete mode 100644 tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py delete mode 100644 tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py diff --git a/synth.metadata b/synth.metadata index 6ad9d5acf..40797dba9 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "3a935fab757e09c72afd4aa121147a4c97dccc3e", - "internalRef": "336931287" + "sha": "215c12ade72d9d9616457d9b8b2f8a37f38e79f3", + "internalRef": "337113354" } }, { @@ -167,9 +167,6 @@ "scripts/readme-gen/templates/install_deps.tmpl.rst", "scripts/readme-gen/templates/install_portaudio.tmpl.rst", "setup.cfg", - "testing/.gitignore", - "tests/unit/gapic/v2/test_bigtable_client_v2.py", - "tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py", - "tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py" + "testing/.gitignore" ] } \ No newline at end of file diff --git a/tests/unit/gapic/v2/test_bigtable_client_v2.py b/tests/unit/gapic/v2/test_bigtable_client_v2.py deleted file mode 100644 index 84abfecef..000000000 --- a/tests/unit/gapic/v2/test_bigtable_client_v2.py +++ /dev/null @@ -1,316 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import bigtable_v2 -from google.cloud.bigtable_v2.proto import bigtable_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def unary_stream(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableClient(object): - def test_read_rows(self): - # Setup Expected Response - last_scanned_row_key = b"-126" - expected_response = {"last_scanned_row_key": last_scanned_row_key} - expected_response = bigtable_pb2.ReadRowsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.read_rows(table_name) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.ReadRowsRequest(table_name=table_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_rows_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.read_rows(table_name) - - def test_sample_row_keys(self): - # Setup Expected Response - row_key = b"122" - offset_bytes = 889884095 - expected_response = {"row_key": row_key, "offset_bytes": offset_bytes} - expected_response = bigtable_pb2.SampleRowKeysResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.sample_row_keys(table_name) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.SampleRowKeysRequest(table_name=table_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_sample_row_keys_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.sample_row_keys(table_name) - - def test_mutate_row(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.MutateRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - mutations = [] - - response = client.mutate_row(table_name, row_key, mutations) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.MutateRowRequest( - table_name=table_name, row_key=row_key, mutations=mutations - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_mutate_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - mutations = [] - - with pytest.raises(CustomException): - client.mutate_row(table_name, row_key, mutations) - - def test_mutate_rows(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.MutateRowsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - entries = [] - - response = client.mutate_rows(table_name, entries) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, entries=entries - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_mutate_rows_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - entries = [] - - with pytest.raises(CustomException): - client.mutate_rows(table_name, entries) - - def test_check_and_mutate_row(self): - # Setup Expected Response - predicate_matched = True - expected_response = {"predicate_matched": predicate_matched} - expected_response = bigtable_pb2.CheckAndMutateRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - - response = client.check_and_mutate_row(table_name, row_key) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.CheckAndMutateRowRequest( - table_name=table_name, row_key=row_key - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_check_and_mutate_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - - with pytest.raises(CustomException): - client.check_and_mutate_row(table_name, row_key) - - def test_read_modify_write_row(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.ReadModifyWriteRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - rules = [] - - response = client.read_modify_write_row(table_name, row_key, rules) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.ReadModifyWriteRowRequest( - table_name=table_name, row_key=row_key, rules=rules - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_modify_write_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - rules = [] - - with pytest.raises(CustomException): - client.read_modify_write_row(table_name, row_key, rules) diff --git a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py deleted file mode 100644 index 035b5aa00..000000000 --- a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ /dev/null @@ -1,924 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableInstanceAdminClient(object): - def test_create_instance(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - clusters = {} - - response = client.create_instance(parent, instance_id, instance, clusters) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, instance_id=instance_id, instance=instance, clusters=clusters - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - clusters = {} - - response = client.create_instance(parent, instance_id, instance, clusters) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_instance(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - expected_response = {"name": name_2, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.get_instance(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.get_instance(name) - - def test_list_instances(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - - response = client.list_instances(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_instances_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.project_path("[PROJECT]") - - with pytest.raises(CustomException): - client.list_instances(parent) - - def test_update_instance(self): - # Setup Expected Response - name = "name3373707" - display_name_2 = "displayName21615000987" - expected_response = {"name": name, "display_name": display_name_2} - expected_response = instance_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - display_name = "displayName1615086568" - - response = client.update_instance(display_name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Instance(display_name=display_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - display_name = "displayName1615086568" - - with pytest.raises(CustomException): - client.update_instance(display_name) - - def test_partial_update_instance(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_partial_update_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - instance = {} - update_mask = {} - - response = client.partial_update_instance(instance, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_partial_update_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_partial_update_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - instance = {} - update_mask = {} - - response = client.partial_update_instance(instance, update_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_instance(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - client.delete_instance(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.delete_instance(name) - - def test_create_cluster(self): - # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} - - response = client.create_cluster(parent, cluster_id, cluster) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} - - response = client.create_cluster(parent, cluster_id, cluster) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_cluster(self): - # Setup Expected Response - name_2 = "name2-1052831874" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name_2, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - response = client.get_cluster(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetClusterRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - with pytest.raises(CustomException): - client.get_cluster(name) - - def test_list_clusters(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListClustersResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.list_clusters(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_clusters_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.list_clusters(parent) - - def test_update_cluster(self): - # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes_2 = 1623486220 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes_2, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - serve_nodes = 1288838783 - - response = client.update_cluster(serve_nodes) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Cluster(serve_nodes=serve_nodes) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - serve_nodes = 1288838783 - - response = client.update_cluster(serve_nodes) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_cluster(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - client.delete_cluster(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - with pytest.raises(CustomException): - client.delete_cluster(name) - - def test_create_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - app_profile_id = "appProfileId1262094415" - app_profile = {} - - response = client.create_app_profile(parent, app_profile_id, app_profile) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest( - parent=parent, app_profile_id=app_profile_id, app_profile=app_profile - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - app_profile_id = "appProfileId1262094415" - app_profile = {} - - with pytest.raises(CustomException): - client.create_app_profile(parent, app_profile_id, app_profile) - - def test_get_app_profile(self): - # Setup Expected Response - name_2 = "name2-1052831874" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name_2, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - response = client.get_app_profile(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - with pytest.raises(CustomException): - client.get_app_profile(name) - - def test_list_app_profiles(self): - # Setup Expected Response - next_page_token = "" - app_profiles_element = {} - app_profiles = [app_profiles_element] - expected_response = { - "next_page_token": next_page_token, - "app_profiles": app_profiles, - } - expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_app_profiles(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.app_profiles[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_app_profiles_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_app_profiles(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_app_profile", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_app_profile_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_app_profile_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_app_profile(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - client.delete_app_profile(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - with pytest.raises(CustomException): - client.delete_app_profile(name) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) diff --git a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py deleted file mode 100644 index 72719a0de..000000000 --- a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ /dev/null @@ -1,1039 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableTableAdminClient(object): - def test_create_table(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - table = {} - - response = client.create_table(parent, table_id, table) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, table_id=table_id, table=table - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - table = {} - - with pytest.raises(CustomException): - client.create_table(parent, table_id, table) - - def test_create_table_from_snapshot(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - source_snapshot = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, table_id=table_id, source_snapshot=source_snapshot - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_table_from_snapshot_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - source_snapshot = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - exception = response.exception() - assert exception.errors[0] == error - - def test_list_tables(self): - # Setup Expected Response - next_page_token = "" - tables_element = {} - tables = [tables_element] - expected_response = {"next_page_token": next_page_token, "tables": tables} - expected_response = bigtable_table_admin_pb2.ListTablesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_tables(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.tables[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListTablesRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_tables_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_tables(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.get_table(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetTableRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.get_table(name) - - def test_delete_table(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - client.delete_table(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteTableRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.delete_table(name) - - def test_modify_column_families(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - modifications = [] - - response = client.modify_column_families(name, modifications) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, modifications=modifications - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_modify_column_families_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - modifications = [] - - with pytest.raises(CustomException): - client.modify_column_families(name, modifications) - - def test_drop_row_range(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - client.drop_row_range(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DropRowRangeRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_drop_row_range_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.drop_row_range(name) - - def test_generate_consistency_token(self): - # Setup Expected Response - consistency_token = "consistencyToken-1090516718" - expected_response = {"consistency_token": consistency_token} - expected_response = bigtable_table_admin_pb2.GenerateConsistencyTokenResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.generate_consistency_token(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_generate_consistency_token_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.generate_consistency_token(name) - - def test_check_consistency(self): - # Setup Expected Response - consistent = True - expected_response = {"consistent": consistent} - expected_response = bigtable_table_admin_pb2.CheckConsistencyResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - consistency_token = "consistencyToken-1090516718" - - response = client.check_consistency(name, consistency_token) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, consistency_token=consistency_token - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_check_consistency_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - consistency_token = "consistencyToken-1090516718" - - with pytest.raises(CustomException): - client.check_consistency(name, consistency_token) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) - - def test_snapshot_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_snapshot_table", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - snapshot_id = "snapshotId-168585866" - - response = client.snapshot_table(name, cluster, snapshot_id) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, cluster=cluster, snapshot_id=snapshot_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_snapshot_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_snapshot_table_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - snapshot_id = "snapshotId-168585866" - - response = client.snapshot_table(name, cluster, snapshot_id) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_snapshot(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.get_snapshot(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_snapshot_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - with pytest.raises(CustomException): - client.get_snapshot(name) - - def test_list_snapshots(self): - # Setup Expected Response - next_page_token = "" - snapshots_element = {} - snapshots = [snapshots_element] - expected_response = {"next_page_token": next_page_token, "snapshots": snapshots} - expected_response = bigtable_table_admin_pb2.ListSnapshotsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_snapshots(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.snapshots[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_snapshots_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_snapshots(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_snapshot(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - client.delete_snapshot(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_snapshot_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - with pytest.raises(CustomException): - client.delete_snapshot(name) - - def test_create_backup(self): - # Setup Expected Response - name = "name3373707" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_backup", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - backup_id = "backupId1355353272" - backup = {} - - response = client.create_backup(parent, backup_id, backup) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateBackupRequest( - parent=parent, backup_id=backup_id, backup=backup - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_backup_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_backup_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - backup_id = "backupId1355353272" - backup = {} - - response = client.create_backup(parent, backup_id, backup) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_backup(self): - # Setup Expected Response - name_2 = "name2-1052831874" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name_2, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - response = client.get_backup(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetBackupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - with pytest.raises(CustomException): - client.get_backup(name) - - def test_list_backups(self): - # Setup Expected Response - next_page_token = "" - backups_element = {} - backups = [backups_element] - expected_response = {"next_page_token": next_page_token, "backups": backups} - expected_response = bigtable_table_admin_pb2.ListBackupsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_backups(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.backups[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListBackupsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_backups_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_backups(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_backup(self): - # Setup Expected Response - name = "name3373707" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - backup = {} - update_mask = {} - - response = client.update_backup(backup, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.UpdateBackupRequest( - backup=backup, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - backup = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_backup(backup, update_mask) - - def test_delete_backup(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - client.delete_backup(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - with pytest.raises(CustomException): - client.delete_backup(name) - - def test_restore_table(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_restore_table", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - response = client.restore_table() - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.RestoreTableRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_restore_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_restore_table_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - response = client.restore_table() - exception = response.exception() - assert exception.errors[0] == error From 3503df9f0549549f2b24e831890c398161b3b516 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 13 Feb 2021 06:03:47 -0800 Subject: [PATCH 04/11] Add schema for providing GAPIC metadata This allows each (API,version,language) combination to provide a mapping between RPCs and library methods invoking those RPCs. Source-Author: Victor Chudnovsky Source-Date: Thu Oct 15 10:14:53 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 121ec62ffda4a20e726631be34d76ad35373ca15 Source-Link: https://github.com/googleapis/googleapis/commit/121ec62ffda4a20e726631be34d76ad35373ca15 --- synth.metadata | 8 +- .../unit/gapic/v2/test_bigtable_client_v2.py | 316 +++++ .../test_bigtable_instance_admin_client_v2.py | 924 +++++++++++++++ .../v2/test_bigtable_table_admin_client_v2.py | 1039 +++++++++++++++++ 4 files changed, 2284 insertions(+), 3 deletions(-) create mode 100644 tests/unit/gapic/v2/test_bigtable_client_v2.py create mode 100644 tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py create mode 100644 tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py diff --git a/synth.metadata b/synth.metadata index 40797dba9..742512874 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,7 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "215c12ade72d9d9616457d9b8b2f8a37f38e79f3", - "internalRef": "337113354" + "sha": "121ec62ffda4a20e726631be34d76ad35373ca15" } }, { @@ -167,6 +166,9 @@ "scripts/readme-gen/templates/install_deps.tmpl.rst", "scripts/readme-gen/templates/install_portaudio.tmpl.rst", "setup.cfg", - "testing/.gitignore" + "testing/.gitignore", + "tests/unit/gapic/v2/test_bigtable_client_v2.py", + "tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py", + "tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py" ] } \ No newline at end of file diff --git a/tests/unit/gapic/v2/test_bigtable_client_v2.py b/tests/unit/gapic/v2/test_bigtable_client_v2.py new file mode 100644 index 000000000..84abfecef --- /dev/null +++ b/tests/unit/gapic/v2/test_bigtable_client_v2.py @@ -0,0 +1,316 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests.""" + +import mock +import pytest + +from google.cloud import bigtable_v2 +from google.cloud.bigtable_v2.proto import bigtable_pb2 + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + response = None + if self.channel_stub.responses: + response = self.channel_stub.responses.pop() + + if isinstance(response, Exception): + raise response + + if response: + return response + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, method, request_serializer=None, response_deserializer=None): + return MultiCallableStub(method, self) + + def unary_stream(self, method, request_serializer=None, response_deserializer=None): + return MultiCallableStub(method, self) + + +class CustomException(Exception): + pass + + +class TestBigtableClient(object): + def test_read_rows(self): + # Setup Expected Response + last_scanned_row_key = b"-126" + expected_response = {"last_scanned_row_key": last_scanned_row_key} + expected_response = bigtable_pb2.ReadRowsResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[iter([expected_response])]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.read_rows(table_name) + resources = list(response) + assert len(resources) == 1 + assert expected_response == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.ReadRowsRequest(table_name=table_name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_read_rows_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.read_rows(table_name) + + def test_sample_row_keys(self): + # Setup Expected Response + row_key = b"122" + offset_bytes = 889884095 + expected_response = {"row_key": row_key, "offset_bytes": offset_bytes} + expected_response = bigtable_pb2.SampleRowKeysResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[iter([expected_response])]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.sample_row_keys(table_name) + resources = list(response) + assert len(resources) == 1 + assert expected_response == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.SampleRowKeysRequest(table_name=table_name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_sample_row_keys_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.sample_row_keys(table_name) + + def test_mutate_row(self): + # Setup Expected Response + expected_response = {} + expected_response = bigtable_pb2.MutateRowResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + mutations = [] + + response = client.mutate_row(table_name, row_key, mutations) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.MutateRowRequest( + table_name=table_name, row_key=row_key, mutations=mutations + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_mutate_row_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + mutations = [] + + with pytest.raises(CustomException): + client.mutate_row(table_name, row_key, mutations) + + def test_mutate_rows(self): + # Setup Expected Response + expected_response = {} + expected_response = bigtable_pb2.MutateRowsResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[iter([expected_response])]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + entries = [] + + response = client.mutate_rows(table_name, entries) + resources = list(response) + assert len(resources) == 1 + assert expected_response == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.MutateRowsRequest( + table_name=table_name, entries=entries + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_mutate_rows_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + entries = [] + + with pytest.raises(CustomException): + client.mutate_rows(table_name, entries) + + def test_check_and_mutate_row(self): + # Setup Expected Response + predicate_matched = True + expected_response = {"predicate_matched": predicate_matched} + expected_response = bigtable_pb2.CheckAndMutateRowResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + + response = client.check_and_mutate_row(table_name, row_key) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.CheckAndMutateRowRequest( + table_name=table_name, row_key=row_key + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_check_and_mutate_row_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + + with pytest.raises(CustomException): + client.check_and_mutate_row(table_name, row_key) + + def test_read_modify_write_row(self): + # Setup Expected Response + expected_response = {} + expected_response = bigtable_pb2.ReadModifyWriteRowResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + rules = [] + + response = client.read_modify_write_row(table_name, row_key, rules) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.ReadModifyWriteRowRequest( + table_name=table_name, row_key=row_key, rules=rules + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_read_modify_write_row_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + rules = [] + + with pytest.raises(CustomException): + client.read_modify_write_row(table_name, row_key, rules) diff --git a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py new file mode 100644 index 000000000..035b5aa00 --- /dev/null +++ b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -0,0 +1,924 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests.""" + +import mock +import pytest + +from google.rpc import status_pb2 + +from google.cloud import bigtable_admin_v2 +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import instance_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import policy_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + response = None + if self.channel_stub.responses: + response = self.channel_stub.responses.pop() + + if isinstance(response, Exception): + raise response + + if response: + return response + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, method, request_serializer=None, response_deserializer=None): + return MultiCallableStub(method, self) + + +class CustomException(Exception): + pass + + +class TestBigtableInstanceAdminClient(object): + def test_create_instance(self): + # Setup Expected Response + name = "name3373707" + display_name = "displayName1615086568" + expected_response = {"name": name, "display_name": display_name} + expected_response = instance_pb2.Instance(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_instance", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.project_path("[PROJECT]") + instance_id = "instanceId-2101995259" + instance = {} + clusters = {} + + response = client.create_instance(parent, instance_id, instance, clusters) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest( + parent=parent, instance_id=instance_id, instance=instance, clusters=clusters + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_instance_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_instance_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.project_path("[PROJECT]") + instance_id = "instanceId-2101995259" + instance = {} + clusters = {} + + response = client.create_instance(parent, instance_id, instance, clusters) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_instance(self): + # Setup Expected Response + name_2 = "name2-1052831874" + display_name = "displayName1615086568" + expected_response = {"name": name_2, "display_name": display_name} + expected_response = instance_pb2.Instance(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + response = client.get_instance(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + with pytest.raises(CustomException): + client.get_instance(name) + + def test_list_instances(self): + # Setup Expected Response + next_page_token = "nextPageToken-1530815211" + expected_response = {"next_page_token": next_page_token} + expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.project_path("[PROJECT]") + + response = client.list_instances(parent) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( + parent=parent + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_instances_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.project_path("[PROJECT]") + + with pytest.raises(CustomException): + client.list_instances(parent) + + def test_update_instance(self): + # Setup Expected Response + name = "name3373707" + display_name_2 = "displayName21615000987" + expected_response = {"name": name, "display_name": display_name_2} + expected_response = instance_pb2.Instance(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + display_name = "displayName1615086568" + + response = client.update_instance(display_name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = instance_pb2.Instance(display_name=display_name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + display_name = "displayName1615086568" + + with pytest.raises(CustomException): + client.update_instance(display_name) + + def test_partial_update_instance(self): + # Setup Expected Response + name = "name3373707" + display_name = "displayName1615086568" + expected_response = {"name": name, "display_name": display_name} + expected_response = instance_pb2.Instance(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_partial_update_instance", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + instance = {} + update_mask = {} + + response = client.partial_update_instance(instance, update_mask) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( + instance=instance, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_partial_update_instance_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_partial_update_instance_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + instance = {} + update_mask = {} + + response = client.partial_update_instance(instance, update_mask) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_instance(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + client.delete_instance(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + with pytest.raises(CustomException): + client.delete_instance(name) + + def test_create_cluster(self): + # Setup Expected Response + name = "name3373707" + location = "location1901043637" + serve_nodes = 1288838783 + expected_response = { + "name": name, + "location": location, + "serve_nodes": serve_nodes, + } + expected_response = instance_pb2.Cluster(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_cluster", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + cluster_id = "clusterId240280960" + cluster = {} + + response = client.create_cluster(parent, cluster_id, cluster) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( + parent=parent, cluster_id=cluster_id, cluster=cluster + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_cluster_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_cluster_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + cluster_id = "clusterId240280960" + cluster = {} + + response = client.create_cluster(parent, cluster_id, cluster) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_cluster(self): + # Setup Expected Response + name_2 = "name2-1052831874" + location = "location1901043637" + serve_nodes = 1288838783 + expected_response = { + "name": name_2, + "location": location, + "serve_nodes": serve_nodes, + } + expected_response = instance_pb2.Cluster(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + response = client.get_cluster(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.GetClusterRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_cluster_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + with pytest.raises(CustomException): + client.get_cluster(name) + + def test_list_clusters(self): + # Setup Expected Response + next_page_token = "nextPageToken-1530815211" + expected_response = {"next_page_token": next_page_token} + expected_response = bigtable_instance_admin_pb2.ListClustersResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + response = client.list_clusters(parent) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.ListClustersRequest( + parent=parent + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_clusters_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + with pytest.raises(CustomException): + client.list_clusters(parent) + + def test_update_cluster(self): + # Setup Expected Response + name = "name3373707" + location = "location1901043637" + serve_nodes_2 = 1623486220 + expected_response = { + "name": name, + "location": location, + "serve_nodes": serve_nodes_2, + } + expected_response = instance_pb2.Cluster(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_update_cluster", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + serve_nodes = 1288838783 + + response = client.update_cluster(serve_nodes) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = instance_pb2.Cluster(serve_nodes=serve_nodes) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_cluster_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_update_cluster_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + serve_nodes = 1288838783 + + response = client.update_cluster(serve_nodes) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_cluster(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + client.delete_cluster(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_cluster_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + with pytest.raises(CustomException): + client.delete_cluster(name) + + def test_create_app_profile(self): + # Setup Expected Response + name = "name3373707" + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name, "etag": etag, "description": description} + expected_response = instance_pb2.AppProfile(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + app_profile_id = "appProfileId1262094415" + app_profile = {} + + response = client.create_app_profile(parent, app_profile_id, app_profile) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest( + parent=parent, app_profile_id=app_profile_id, app_profile=app_profile + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_app_profile_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + app_profile_id = "appProfileId1262094415" + app_profile = {} + + with pytest.raises(CustomException): + client.create_app_profile(parent, app_profile_id, app_profile) + + def test_get_app_profile(self): + # Setup Expected Response + name_2 = "name2-1052831874" + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name_2, "etag": etag, "description": description} + expected_response = instance_pb2.AppProfile(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + + response = client.get_app_profile(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_app_profile_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + + with pytest.raises(CustomException): + client.get_app_profile(name) + + def test_list_app_profiles(self): + # Setup Expected Response + next_page_token = "" + app_profiles_element = {} + app_profiles = [app_profiles_element] + expected_response = { + "next_page_token": next_page_token, + "app_profiles": app_profiles, + } + expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_app_profiles(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.app_profiles[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest( + parent=parent + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_app_profiles_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_app_profiles(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_update_app_profile(self): + # Setup Expected Response + name = "name3373707" + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name, "etag": etag, "description": description} + expected_response = instance_pb2.AppProfile(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_update_app_profile", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + app_profile = {} + update_mask = {} + + response = client.update_app_profile(app_profile, update_mask) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( + app_profile=app_profile, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_app_profile_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_update_app_profile_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + app_profile = {} + update_mask = {} + + response = client.update_app_profile(app_profile, update_mask) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_app_profile(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + + client.delete_app_profile(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( + name=name + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_app_profile_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + + with pytest.raises(CustomException): + client.delete_app_profile(name) + + def test_get_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"21" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + resource = "resource-341064690" + + response = client.get_iam_policy(resource) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + resource = "resource-341064690" + + with pytest.raises(CustomException): + client.get_iam_policy(resource) + + def test_set_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"21" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + resource = "resource-341064690" + policy = {} + + response = client.set_iam_policy(resource, policy) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, policy=policy + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_set_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + resource = "resource-341064690" + policy = {} + + with pytest.raises(CustomException): + client.set_iam_policy(resource, policy) + + def test_test_iam_permissions(self): + # Setup Expected Response + expected_response = {} + expected_response = iam_policy_pb2.TestIamPermissionsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + resource = "resource-341064690" + permissions = [] + + response = client.test_iam_permissions(resource, permissions) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_test_iam_permissions_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + resource = "resource-341064690" + permissions = [] + + with pytest.raises(CustomException): + client.test_iam_permissions(resource, permissions) diff --git a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py new file mode 100644 index 000000000..72719a0de --- /dev/null +++ b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -0,0 +1,1039 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests.""" + +import mock +import pytest + +from google.rpc import status_pb2 + +from google.cloud import bigtable_admin_v2 +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import policy_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + response = None + if self.channel_stub.responses: + response = self.channel_stub.responses.pop() + + if isinstance(response, Exception): + raise response + + if response: + return response + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, method, request_serializer=None, response_deserializer=None): + return MultiCallableStub(method, self) + + +class CustomException(Exception): + pass + + +class TestBigtableTableAdminClient(object): + def test_create_table(self): + # Setup Expected Response + name = "name3373707" + expected_response = {"name": name} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + table = {} + + response = client.create_table(parent, table_id, table) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateTableRequest( + parent=parent, table_id=table_id, table=table + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + table = {} + + with pytest.raises(CustomException): + client.create_table(parent, table_id, table) + + def test_create_table_from_snapshot(self): + # Setup Expected Response + name = "name3373707" + expected_response = {"name": name} + expected_response = table_pb2.Table(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_table_from_snapshot", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + source_snapshot = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + response = client.create_table_from_snapshot(parent, table_id, source_snapshot) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( + parent=parent, table_id=table_id, source_snapshot=source_snapshot + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_table_from_snapshot_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_table_from_snapshot_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + source_snapshot = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + response = client.create_table_from_snapshot(parent, table_id, source_snapshot) + exception = response.exception() + assert exception.errors[0] == error + + def test_list_tables(self): + # Setup Expected Response + next_page_token = "" + tables_element = {} + tables = [tables_element] + expected_response = {"next_page_token": next_page_token, "tables": tables} + expected_response = bigtable_table_admin_pb2.ListTablesResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_tables(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.tables[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ListTablesRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_tables_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_tables(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_get_table(self): + # Setup Expected Response + name_2 = "name2-1052831874" + expected_response = {"name": name_2} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.get_table(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GetTableRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.get_table(name) + + def test_delete_table(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + client.delete_table(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteTableRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.delete_table(name) + + def test_modify_column_families(self): + # Setup Expected Response + name_2 = "name2-1052831874" + expected_response = {"name": name_2} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + modifications = [] + + response = client.modify_column_families(name, modifications) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( + name=name, modifications=modifications + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_modify_column_families_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + modifications = [] + + with pytest.raises(CustomException): + client.modify_column_families(name, modifications) + + def test_drop_row_range(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + client.drop_row_range(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DropRowRangeRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_drop_row_range_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.drop_row_range(name) + + def test_generate_consistency_token(self): + # Setup Expected Response + consistency_token = "consistencyToken-1090516718" + expected_response = {"consistency_token": consistency_token} + expected_response = bigtable_table_admin_pb2.GenerateConsistencyTokenResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.generate_consistency_token(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( + name=name + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_generate_consistency_token_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.generate_consistency_token(name) + + def test_check_consistency(self): + # Setup Expected Response + consistent = True + expected_response = {"consistent": consistent} + expected_response = bigtable_table_admin_pb2.CheckConsistencyResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + consistency_token = "consistencyToken-1090516718" + + response = client.check_consistency(name, consistency_token) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CheckConsistencyRequest( + name=name, consistency_token=consistency_token + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_check_consistency_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + consistency_token = "consistencyToken-1090516718" + + with pytest.raises(CustomException): + client.check_consistency(name, consistency_token) + + def test_get_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"21" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + resource = "resource-341064690" + + response = client.get_iam_policy(resource) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + resource = "resource-341064690" + + with pytest.raises(CustomException): + client.get_iam_policy(resource) + + def test_set_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"21" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + resource = "resource-341064690" + policy = {} + + response = client.set_iam_policy(resource, policy) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, policy=policy + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_set_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + resource = "resource-341064690" + policy = {} + + with pytest.raises(CustomException): + client.set_iam_policy(resource, policy) + + def test_test_iam_permissions(self): + # Setup Expected Response + expected_response = {} + expected_response = iam_policy_pb2.TestIamPermissionsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + resource = "resource-341064690" + permissions = [] + + response = client.test_iam_permissions(resource, permissions) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_test_iam_permissions_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + resource = "resource-341064690" + permissions = [] + + with pytest.raises(CustomException): + client.test_iam_permissions(resource, permissions) + + def test_snapshot_table(self): + # Setup Expected Response + name_2 = "name2-1052831874" + data_size_bytes = 2110122398 + description = "description-1724546052" + expected_response = { + "name": name_2, + "data_size_bytes": data_size_bytes, + "description": description, + } + expected_response = table_pb2.Snapshot(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_snapshot_table", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + snapshot_id = "snapshotId-168585866" + + response = client.snapshot_table(name, cluster, snapshot_id) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( + name=name, cluster=cluster, snapshot_id=snapshot_id + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_snapshot_table_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_snapshot_table_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + snapshot_id = "snapshotId-168585866" + + response = client.snapshot_table(name, cluster, snapshot_id) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_snapshot(self): + # Setup Expected Response + name_2 = "name2-1052831874" + data_size_bytes = 2110122398 + description = "description-1724546052" + expected_response = { + "name": name_2, + "data_size_bytes": data_size_bytes, + "description": description, + } + expected_response = table_pb2.Snapshot(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + response = client.get_snapshot(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_snapshot_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + with pytest.raises(CustomException): + client.get_snapshot(name) + + def test_list_snapshots(self): + # Setup Expected Response + next_page_token = "" + snapshots_element = {} + snapshots = [snapshots_element] + expected_response = {"next_page_token": next_page_token, "snapshots": snapshots} + expected_response = bigtable_table_admin_pb2.ListSnapshotsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_snapshots(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.snapshots[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_snapshots_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_snapshots(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_delete_snapshot(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + client.delete_snapshot(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_snapshot_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + with pytest.raises(CustomException): + client.delete_snapshot(name) + + def test_create_backup(self): + # Setup Expected Response + name = "name3373707" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_backup", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + backup_id = "backupId1355353272" + backup = {} + + response = client.create_backup(parent, backup_id, backup) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateBackupRequest( + parent=parent, backup_id=backup_id, backup=backup + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_backup_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_backup_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + backup_id = "backupId1355353272" + backup = {} + + response = client.create_backup(parent, backup_id, backup) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_backup(self): + # Setup Expected Response + name_2 = "name2-1052831874" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name_2, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + response = client.get_backup(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GetBackupRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + with pytest.raises(CustomException): + client.get_backup(name) + + def test_list_backups(self): + # Setup Expected Response + next_page_token = "" + backups_element = {} + backups = [backups_element] + expected_response = {"next_page_token": next_page_token, "backups": backups} + expected_response = bigtable_table_admin_pb2.ListBackupsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_backups(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.backups[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ListBackupsRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_backups_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_backups(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_update_backup(self): + # Setup Expected Response + name = "name3373707" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + backup = {} + update_mask = {} + + response = client.update_backup(backup, update_mask) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.UpdateBackupRequest( + backup=backup, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + backup = {} + update_mask = {} + + with pytest.raises(CustomException): + client.update_backup(backup, update_mask) + + def test_delete_backup(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + client.delete_backup(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + with pytest.raises(CustomException): + client.delete_backup(name) + + def test_restore_table(self): + # Setup Expected Response + name = "name3373707" + expected_response = {"name": name} + expected_response = table_pb2.Table(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_restore_table", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + response = client.restore_table() + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.RestoreTableRequest() + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_restore_table_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_restore_table_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + response = client.restore_table() + exception = response.exception() + assert exception.errors[0] == error From dca84fda96a0c272fe7a12e30514d176d9913b50 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 13 Feb 2021 06:05:17 -0800 Subject: [PATCH 05/11] chore: update grpc dependency to v1.33.1 PiperOrigin-RevId: 338646463 Source-Author: Google APIs Source-Date: Fri Oct 23 03:57:15 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 20b11dfe4538cd5da7b4c3dd7d2bf5b9922ff3ed Source-Link: https://github.com/googleapis/googleapis/commit/20b11dfe4538cd5da7b4c3dd7d2bf5b9922ff3ed --- .../proto/bigtable_instance_admin_pb2_grpc.py | 38 ++++++++++++++++ .../proto/bigtable_table_admin_pb2_grpc.py | 44 +++++++++++++++++++ .../bigtable_v2/proto/bigtable_pb2_grpc.py | 12 +++++ synth.metadata | 3 +- 4 files changed, 96 insertions(+), 1 deletion(-) diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py index 8b1395579..8b736d31d 100644 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py @@ -388,6 +388,7 @@ def CreateInstance( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -401,6 +402,7 @@ def CreateInstance( google_dot_longrunning_dot_operations__pb2.Operation.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -415,6 +417,7 @@ def GetInstance( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -428,6 +431,7 @@ def GetInstance( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -442,6 +446,7 @@ def ListInstances( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -455,6 +460,7 @@ def ListInstances( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -469,6 +475,7 @@ def UpdateInstance( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -482,6 +489,7 @@ def UpdateInstance( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -496,6 +504,7 @@ def PartialUpdateInstance( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -509,6 +518,7 @@ def PartialUpdateInstance( google_dot_longrunning_dot_operations__pb2.Operation.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -523,6 +533,7 @@ def DeleteInstance( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -536,6 +547,7 @@ def DeleteInstance( google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -550,6 +562,7 @@ def CreateCluster( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -563,6 +576,7 @@ def CreateCluster( google_dot_longrunning_dot_operations__pb2.Operation.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -577,6 +591,7 @@ def GetCluster( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -590,6 +605,7 @@ def GetCluster( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -604,6 +620,7 @@ def ListClusters( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -617,6 +634,7 @@ def ListClusters( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -631,6 +649,7 @@ def UpdateCluster( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -644,6 +663,7 @@ def UpdateCluster( google_dot_longrunning_dot_operations__pb2.Operation.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -658,6 +678,7 @@ def DeleteCluster( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -671,6 +692,7 @@ def DeleteCluster( google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -685,6 +707,7 @@ def CreateAppProfile( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -698,6 +721,7 @@ def CreateAppProfile( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -712,6 +736,7 @@ def GetAppProfile( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -725,6 +750,7 @@ def GetAppProfile( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -739,6 +765,7 @@ def ListAppProfiles( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -752,6 +779,7 @@ def ListAppProfiles( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -766,6 +794,7 @@ def UpdateAppProfile( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -779,6 +808,7 @@ def UpdateAppProfile( google_dot_longrunning_dot_operations__pb2.Operation.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -793,6 +823,7 @@ def DeleteAppProfile( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -806,6 +837,7 @@ def DeleteAppProfile( google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -820,6 +852,7 @@ def GetIamPolicy( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -833,6 +866,7 @@ def GetIamPolicy( google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -847,6 +881,7 @@ def SetIamPolicy( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -860,6 +895,7 @@ def SetIamPolicy( google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -874,6 +910,7 @@ def TestIamPermissions( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -887,6 +924,7 @@ def TestIamPermissions( google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py index 2b8d46e20..3c9fb609d 100644 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py @@ -502,6 +502,7 @@ def CreateTable( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -515,6 +516,7 @@ def CreateTable( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -529,6 +531,7 @@ def CreateTableFromSnapshot( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -542,6 +545,7 @@ def CreateTableFromSnapshot( google_dot_longrunning_dot_operations__pb2.Operation.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -556,6 +560,7 @@ def ListTables( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -569,6 +574,7 @@ def ListTables( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -583,6 +589,7 @@ def GetTable( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -596,6 +603,7 @@ def GetTable( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -610,6 +618,7 @@ def DeleteTable( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -623,6 +632,7 @@ def DeleteTable( google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -637,6 +647,7 @@ def ModifyColumnFamilies( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -650,6 +661,7 @@ def ModifyColumnFamilies( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -664,6 +676,7 @@ def DropRowRange( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -677,6 +690,7 @@ def DropRowRange( google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -691,6 +705,7 @@ def GenerateConsistencyToken( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -704,6 +719,7 @@ def GenerateConsistencyToken( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -718,6 +734,7 @@ def CheckConsistency( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -731,6 +748,7 @@ def CheckConsistency( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -745,6 +763,7 @@ def SnapshotTable( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -758,6 +777,7 @@ def SnapshotTable( google_dot_longrunning_dot_operations__pb2.Operation.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -772,6 +792,7 @@ def GetSnapshot( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -785,6 +806,7 @@ def GetSnapshot( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -799,6 +821,7 @@ def ListSnapshots( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -812,6 +835,7 @@ def ListSnapshots( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -826,6 +850,7 @@ def DeleteSnapshot( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -839,6 +864,7 @@ def DeleteSnapshot( google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -853,6 +879,7 @@ def CreateBackup( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -866,6 +893,7 @@ def CreateBackup( google_dot_longrunning_dot_operations__pb2.Operation.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -880,6 +908,7 @@ def GetBackup( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -893,6 +922,7 @@ def GetBackup( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -907,6 +937,7 @@ def UpdateBackup( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -920,6 +951,7 @@ def UpdateBackup( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -934,6 +966,7 @@ def DeleteBackup( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -947,6 +980,7 @@ def DeleteBackup( google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -961,6 +995,7 @@ def ListBackups( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -974,6 +1009,7 @@ def ListBackups( google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -988,6 +1024,7 @@ def RestoreTable( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -1001,6 +1038,7 @@ def RestoreTable( google_dot_longrunning_dot_operations__pb2.Operation.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -1015,6 +1053,7 @@ def GetIamPolicy( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -1028,6 +1067,7 @@ def GetIamPolicy( google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -1042,6 +1082,7 @@ def SetIamPolicy( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -1055,6 +1096,7 @@ def SetIamPolicy( google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -1069,6 +1111,7 @@ def TestIamPermissions( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -1082,6 +1125,7 @@ def TestIamPermissions( google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, diff --git a/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py b/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py index db4ee99f3..a4f25dcb0 100644 --- a/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py +++ b/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py @@ -161,6 +161,7 @@ def ReadRows( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -174,6 +175,7 @@ def ReadRows( google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -188,6 +190,7 @@ def SampleRowKeys( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -201,6 +204,7 @@ def SampleRowKeys( google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -215,6 +219,7 @@ def MutateRow( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -228,6 +233,7 @@ def MutateRow( google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -242,6 +248,7 @@ def MutateRows( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -255,6 +262,7 @@ def MutateRows( google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -269,6 +277,7 @@ def CheckAndMutateRow( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -282,6 +291,7 @@ def CheckAndMutateRow( google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, @@ -296,6 +306,7 @@ def ReadModifyWriteRow( options=(), channel_credentials=None, call_credentials=None, + insecure=False, compression=None, wait_for_ready=None, timeout=None, @@ -309,6 +320,7 @@ def ReadModifyWriteRow( google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, diff --git a/synth.metadata b/synth.metadata index 742512874..37252488b 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,7 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "121ec62ffda4a20e726631be34d76ad35373ca15" + "sha": "20b11dfe4538cd5da7b4c3dd7d2bf5b9922ff3ed", + "internalRef": "338646463" } }, { From 976766cc60b446065230fae8cedbadaec5b93a4e Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 13 Feb 2021 06:07:29 -0800 Subject: [PATCH 06/11] feat:Update BigtableTableAdmin GetIamPolicy to include the additional binding for Backup. feat:Change DeleteAppProfileRequest.ignore_warnings to REQUIRED. PiperOrigin-RevId: 339464550 Source-Author: Google APIs Source-Date: Wed Oct 28 08:32:48 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: ccd6462d31e6422fd188b6590aa8d0ad03e7d9a3 Source-Link: https://github.com/googleapis/googleapis/commit/ccd6462d31e6422fd188b6590aa8d0ad03e7d9a3 --- .../gapic/bigtable_instance_admin_client.py | 9 +- .../gapic/bigtable_table_admin_client.py | 25 +- .../bigtable_table_admin_grpc_transport.py | 4 +- .../proto/bigtable_instance_admin.proto | 11 +- .../proto/bigtable_instance_admin_pb2.py | 18 +- .../proto/bigtable_table_admin.proto | 327 ++--- .../proto/bigtable_table_admin_pb2.py | 1169 +++++++++-------- .../proto/bigtable_table_admin_pb2_grpc.py | 16 +- synth.metadata | 4 +- .../test_bigtable_instance_admin_client_v2.py | 8 +- .../v2/test_bigtable_table_admin_client_v2.py | 16 +- 11 files changed, 821 insertions(+), 786 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index 27586d140..bdb4e2f28 100644 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -1577,7 +1577,7 @@ def update_app_profile( def delete_app_profile( self, name, - ignore_warnings=None, + ignore_warnings, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, @@ -1592,13 +1592,16 @@ def delete_app_profile( >>> >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') >>> - >>> client.delete_app_profile(name) + >>> # TODO: Initialize `ignore_warnings`: + >>> ignore_warnings = False + >>> + >>> client.delete_app_profile(name, ignore_warnings) Args: name (str): Required. The unique name of the app profile to be deleted. Values are of the form ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - ignore_warnings (bool): If true, ignore safety checks when deleting the app profile. + ignore_warnings (bool): Required. If true, ignore safety checks when deleting the app profile. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index db5528e9f..8ef4a22f7 100644 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -760,10 +760,10 @@ def modify_column_families( name (str): Required. The unique name of the table whose families should be modified. Values are of the form ``projects/{project}/instances/{instance}/tables/{table}``. - modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Required. Modifications to be atomically applied to the specified table's - families. Entries are applied in order, meaning that earlier modifications - can be masked by later ones (in the case of repeated updates to the same - family, for example). + modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Required. Modifications to be atomically applied to the specified table's families. + Entries are applied in order, meaning that earlier modifications can be + masked by later ones (in the case of repeated updates to the same family, + for example). If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Modification` @@ -1068,7 +1068,7 @@ def get_iam_policy( metadata=None, ): """ - Gets the access control policy for a resource. + Gets the access control policy for a Table or Backup resource. Returns an empty policy if the resource exists but does not have a policy set. @@ -1232,7 +1232,7 @@ def test_iam_permissions( metadata=None, ): """ - Returns permissions that the caller has on the specified table resource. + Returns permissions that the caller has on the specified Table or Backup resource. Example: >>> from google.cloud import bigtable_admin_v2 @@ -1916,7 +1916,7 @@ def list_backups( expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be <, >, <=, >=, !=, - =, or :. Colon ‘:’ represents a HAS operator which is roughly synonymous + =, or :. Colon ':' represents a HAS operator which is roughly synonymous with equality. Filter rules are case insensitive. The fields eligible for filtering are: @@ -2190,8 +2190,8 @@ def delete_backup( def restore_table( self, - parent=None, - table_id=None, + parent, + table_id, backup=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, @@ -2210,7 +2210,12 @@ def restore_table( >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> response = client.restore_table() + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize `table_id`: + >>> table_id = '' + >>> + >>> response = client.restore_table(parent, table_id) >>> >>> def callback(operation_future): ... # Handle result. diff --git a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index 281bad20a..c732bca97 100644 --- a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -267,7 +267,7 @@ def check_consistency(self): def get_iam_policy(self): """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_iam_policy`. - Gets the access control policy for a resource. + Gets the access control policy for a Table or Backup resource. Returns an empty policy if the resource exists but does not have a policy set. @@ -296,7 +296,7 @@ def set_iam_policy(self): def test_iam_permissions(self): """Return the gRPC stub for :meth:`BigtableTableAdminClient.test_iam_permissions`. - Returns permissions that the caller has on the specified table resource. + Returns permissions that the caller has on the specified Table or Backup resource. Returns: Callable: A callable which accepts the appropriate diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto index 8b19b5582..ca3aaed7a 100644 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -565,9 +564,11 @@ message DeleteAppProfileRequest { } ]; - // If true, ignore safety checks when deleting the app profile. - bool ignore_warnings = 2; + // Required. If true, ignore safety checks when deleting the app profile. + bool ignore_warnings = 2 [(google.api.field_behavior) = REQUIRED]; } // The metadata for the Operation returned by UpdateAppProfile. -message UpdateAppProfileMetadata {} +message UpdateAppProfileMetadata { + +} diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index 63590907a..3f4c9e3cc 100644 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -35,7 +35,7 @@ syntax="proto3", serialized_options=b'\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', create_key=_descriptor._internal_create_key, - serialized_pb=b'\nBgoogle/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/bigtable_admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xdb\x02\n\x15\x43reateInstanceRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x18\n\x0binstance_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12T\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntryB\x03\xe0\x41\x02\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01"L\n\x12GetInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"o\n\x14ListInstancesRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x8f\x01\n\x1cPartialUpdateInstanceRequest\x12\x39\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"O\n\x15\x44\x65leteInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"\xa2\x01\n\x14\x43reateClusterRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x17\n\ncluster_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.ClusterB\x03\xe0\x41\x02"J\n\x11GetClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"c\n\x13ListClustersRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"M\n\x14\x44\x65leteClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc9\x01\n\x17\x43reateAppProfileRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12>\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"P\n\x14GetAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile"y\n\x16ListAppProfilesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\xa8\x01\n\x17UpdateAppProfileRequest\x12>\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"l\n\x17\x44\x65leteAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08"\x1a\n\x18UpdateAppProfileMetadata2\x92\x1e\n\x15\x42igtableInstanceAdmin\x12\xda\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\xda\x41$parent,instance_id,instance,clusters\xca\x41"\n\x08Instance\x12\x16\x43reateInstanceMetadata\x12\x91\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance"0\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xa4\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse"2\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\xda\x41\x06parent\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xe8\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\xda\x41\x14instance,update_mask\xca\x41"\n\x08Instance\x12\x16UpdateInstanceMetadata\x12\x8b\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty"0\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xdc\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"|\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\xda\x41\x19parent,cluster_id,cluster\xca\x41 \n\x07\x43luster\x12\x15\x43reateClusterMetadata\x12\x99\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster";\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xac\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"=\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\xda\x41\x06parent\x12\xad\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"Z\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\xca\x41 \n\x07\x43luster\x12\x15UpdateClusterMetadata\x12\x94\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty";\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xd5\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"h\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\xda\x41!parent,app_profile_id,app_profile\x12\xa5\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile">\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\xb8\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\xda\x41\x06parent\x12\xfa\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"\x93\x01\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\xda\x41\x17\x61pp_profile,update_mask\xca\x41&\n\nAppProfile\x12\x18UpdateAppProfileMetadata\x12\x9d\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty">\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\x93\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"H\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\x9a\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"O\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xc5\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"Z\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\x9a\x03\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xf7\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xe2\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', + serialized_pb=b'\nBgoogle/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/bigtable_admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xdb\x02\n\x15\x43reateInstanceRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x18\n\x0binstance_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12T\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntryB\x03\xe0\x41\x02\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01"L\n\x12GetInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"o\n\x14ListInstancesRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x8f\x01\n\x1cPartialUpdateInstanceRequest\x12\x39\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"O\n\x15\x44\x65leteInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"\xa2\x01\n\x14\x43reateClusterRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x17\n\ncluster_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.ClusterB\x03\xe0\x41\x02"J\n\x11GetClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"c\n\x13ListClustersRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"M\n\x14\x44\x65leteClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc9\x01\n\x17\x43reateAppProfileRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12>\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"P\n\x14GetAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile"y\n\x16ListAppProfilesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\xa8\x01\n\x17UpdateAppProfileRequest\x12>\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"q\n\x17\x44\x65leteAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile\x12\x1c\n\x0fignore_warnings\x18\x02 \x01(\x08\x42\x03\xe0\x41\x02"\x1a\n\x18UpdateAppProfileMetadata2\x92\x1e\n\x15\x42igtableInstanceAdmin\x12\xda\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\xda\x41$parent,instance_id,instance,clusters\xca\x41"\n\x08Instance\x12\x16\x43reateInstanceMetadata\x12\x91\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance"0\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xa4\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse"2\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\xda\x41\x06parent\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xe8\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\xda\x41\x14instance,update_mask\xca\x41"\n\x08Instance\x12\x16UpdateInstanceMetadata\x12\x8b\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty"0\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xdc\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"|\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\xda\x41\x19parent,cluster_id,cluster\xca\x41 \n\x07\x43luster\x12\x15\x43reateClusterMetadata\x12\x99\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster";\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xac\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"=\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\xda\x41\x06parent\x12\xad\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"Z\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\xca\x41 \n\x07\x43luster\x12\x15UpdateClusterMetadata\x12\x94\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty";\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xd5\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"h\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\xda\x41!parent,app_profile_id,app_profile\x12\xa5\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile">\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\xb8\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\xda\x41\x06parent\x12\xfa\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"\x93\x01\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\xda\x41\x17\x61pp_profile,update_mask\xca\x41&\n\nAppProfile\x12\x18UpdateAppProfileMetadata\x12\x9d\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty">\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\x93\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"H\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\x9a\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"O\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xc5\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"Z\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\x9a\x03\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xf7\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xe2\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -1524,7 +1524,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), @@ -1538,7 +1538,7 @@ extension_ranges=[], oneofs=[], serialized_start=3419, - serialized_end=3527, + serialized_end=3532, ) @@ -1558,8 +1558,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3529, - serialized_end=3555, + serialized_start=3534, + serialized_end=3560, ) _CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name[ @@ -2177,7 +2177,8 @@ Values are of the form ``projects/{project}/instances/{instanc e}/appProfiles/{app_profile}``. ignore_warnings: - If true, ignore safety checks when deleting the app profile. + Required. If true, ignore safety checks when deleting the app + profile. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteAppProfileRequest) }, @@ -2222,6 +2223,7 @@ _UPDATEAPPPROFILEREQUEST.fields_by_name["app_profile"]._options = None _UPDATEAPPPROFILEREQUEST.fields_by_name["update_mask"]._options = None _DELETEAPPPROFILEREQUEST.fields_by_name["name"]._options = None +_DELETEAPPPROFILEREQUEST.fields_by_name["ignore_warnings"]._options = None _BIGTABLEINSTANCEADMIN = _descriptor.ServiceDescriptor( name="BigtableInstanceAdmin", @@ -2230,8 +2232,8 @@ index=0, serialized_options=b"\312A\034bigtableadmin.googleapis.com\322A\367\002https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", create_key=_descriptor._internal_create_key, - serialized_start=3558, - serialized_end=7416, + serialized_start=3563, + serialized_end=7421, methods=[ _descriptor.MethodDescriptor( name="CreateInstance", diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto index 6f434a473..d979dba59 100644 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto @@ -72,8 +72,7 @@ service BigtableTableAdmin { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) - returns (google.longrunning.Operation) { + rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" body: "*" @@ -135,8 +134,7 @@ service BigtableTableAdmin { // CheckConsistency to check whether mutations to the table that finished // before this call started have been replicated. The tokens will be available // for 90 days. - rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) - returns (GenerateConsistencyTokenResponse) { + rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) returns (GenerateConsistencyTokenResponse) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" body: "*" @@ -147,8 +145,7 @@ service BigtableTableAdmin { // Checks replication consistency based on a consistency token, that is, if // replication has caught up based on the conditions specified in the token // and the check request. - rpc CheckConsistency(CheckConsistencyRequest) - returns (CheckConsistencyResponse) { + rpc CheckConsistency(CheckConsistencyRequest) returns (CheckConsistencyResponse) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" body: "*" @@ -164,14 +161,12 @@ service BigtableTableAdmin { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - rpc SnapshotTable(SnapshotTableRequest) - returns (google.longrunning.Operation) { + rpc SnapshotTable(SnapshotTableRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" body: "*" }; - option (google.api.method_signature) = - "name,cluster,snapshot_id,description"; + option (google.api.method_signature) = "name,cluster,snapshot_id,description"; option (google.longrunning.operation_info) = { response_type: "Snapshot" metadata_type: "SnapshotTableMetadata" @@ -220,24 +215,24 @@ service BigtableTableAdmin { option (google.api.method_signature) = "name"; } - // Starts creating a new Cloud Bigtable Backup. The returned backup + // Starts creating a new Cloud Bigtable Backup. The returned backup // [long-running operation][google.longrunning.Operation] can be used to // track creation of the backup. The // [metadata][google.longrunning.Operation.metadata] field type is // [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The // [response][google.longrunning.Operation.response] field type is - // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the - // returned operation will stop the creation and delete the backup. + // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the + // creation and delete the backup. rpc CreateBackup(CreateBackupRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*/clusters/*}/backups" body: "backup" }; + option (google.api.method_signature) = "parent,backup_id,backup"; option (google.longrunning.operation_info) = { response_type: "Backup" metadata_type: "CreateBackupMetadata" }; - option (google.api.method_signature) = "parent,backup_id,backup"; } // Gets metadata on a pending or completed Cloud Bigtable Backup. @@ -275,11 +270,11 @@ service BigtableTableAdmin { } // Create a new table by restoring from a completed backup. The new table - // must be in the same instance as the instance containing the backup. The + // must be in the same instance as the instance containing the backup. The // returned table [long-running operation][google.longrunning.Operation] can - // be used to track the progress of the operation, and to cancel it. The + // be used to track the progress of the operation, and to cancel it. The // [metadata][google.longrunning.Operation.metadata] field type is - // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. rpc RestoreTable(RestoreTableRequest) returns (google.longrunning.Operation) { @@ -293,22 +288,24 @@ service BigtableTableAdmin { }; } - // Gets the access control policy for a resource. + // Gets the access control policy for a Table or Backup resource. // Returns an empty policy if the resource exists but does not have a policy // set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) - returns (google.iam.v1.Policy) { + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" body: "*" + additional_bindings { + post: "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy" + body: "*" + } }; option (google.api.method_signature) = "resource"; } // Sets the access control policy on a Table or Backup resource. // Replaces any existing policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) - returns (google.iam.v1.Policy) { + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" body: "*" @@ -320,9 +317,8 @@ service BigtableTableAdmin { option (google.api.method_signature) = "resource,policy"; } - // Returns permissions that the caller has on the specified table resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) - returns (google.iam.v1.TestIamPermissionsResponse) { + // Returns permissions that the caller has on the specified Table or Backup resource. + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" body: "*" @@ -335,6 +331,78 @@ service BigtableTableAdmin { } } +// The request for +// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. +message RestoreTableRequest { + // Required. The name of the instance in which to create the restored + // table. This instance must be the parent of the source backup. Values are + // of the form `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Instance" + } + ]; + + // Required. The id of the table to create and restore to. This + // table must not already exist. The `table_id` appended to + // `parent` forms the full table name of the form + // `projects//instances//tables/`. + string table_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The source from which to restore. + oneof source { + // Name of the backup from which to restore. Values are of the form + // `projects//instances//clusters//backups/`. + string backup = 3 [(google.api.resource_reference) = { + type: "bigtable.googleapis.com/Backup" + }]; + } +} + +// Metadata type for the long-running operation returned by +// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. +message RestoreTableMetadata { + // Name of the table being created and restored to. + string name = 1; + + // The type of the restore source. + RestoreSourceType source_type = 2; + + // Information about the source used to restore the table, as specified by + // `source` in [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. + oneof source_info { + BackupInfo backup_info = 3; + } + + // If exists, the name of the long-running operation that will be used to + // track the post-restore optimization process to optimize the performance of + // the restored table. The metadata type of the long-running operation is + // [OptimizeRestoreTableMetadata][]. The response type is + // [Empty][google.protobuf.Empty]. This long-running operation may be + // automatically created by the system if applicable after the + // RestoreTable long-running operation completes successfully. This operation + // may not be created if the table is already optimized or the restore was + // not successful. + string optimize_table_operation_name = 4; + + // The progress of the [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] + // operation. + OperationProgress progress = 5; +} + +// Metadata type for the long-running operation used to track the progress +// of optimizations performed on a newly restored table. This long-running +// operation is automatically created by the system after the successful +// completion of a table restore, and cannot be cancelled. +message OptimizeRestoredTableMetadata { + // Name of the restored table being optimized. + string name = 1; + + // The progress of the post-restore optimizations. + OperationProgress progress = 2; +} + // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] message CreateTableRequest { @@ -353,8 +421,8 @@ message CreateTableRequest { } ]; - // Required. The name by which the new table should be referred to within the - // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + // Required. The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. // Maximum 50 characters. string table_id = 2 [(google.api.field_behavior) = REQUIRED]; @@ -397,13 +465,13 @@ message CreateTableFromSnapshotRequest { } ]; - // Required. The name by which the new table should be referred to within the - // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + // Required. The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. string table_id = 2 [(google.api.field_behavior) = REQUIRED]; - // Required. The unique name of the snapshot from which to restore the table. - // The snapshot and the table must be in the same instance. Values are of the - // form + // Required. The unique name of the snapshot from which to restore the table. The + // snapshot and the table must be in the same instance. + // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. string source_snapshot = 3 [ (google.api.field_behavior) = REQUIRED, @@ -421,7 +489,9 @@ message DropRowRangeRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; // Delete all rows or by prefix. @@ -438,8 +508,8 @@ message DropRowRangeRequest { // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] message ListTablesRequest { - // Required. The unique name of the instance for which tables should be - // listed. Values are of the form `projects/{project}/instances/{instance}`. + // Required. The unique name of the instance for which tables should be listed. + // Values are of the form `projects/{project}/instances/{instance}`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -486,7 +556,9 @@ message GetTableRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; // The view to be applied to the returned table's fields. @@ -502,7 +574,9 @@ message DeleteTableRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; } @@ -535,26 +609,29 @@ message ModifyColumnFamiliesRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; - // Required. Modifications to be atomically applied to the specified table's - // families. Entries are applied in order, meaning that earlier modifications - // can be masked by later ones (in the case of repeated updates to the same - // family, for example). - repeated Modification modifications = 2 - [(google.api.field_behavior) = REQUIRED]; + // Required. Modifications to be atomically applied to the specified table's families. + // Entries are applied in order, meaning that earlier modifications can be + // masked by later ones (in the case of repeated updates to the same family, + // for example). + repeated Modification modifications = 2 [(google.api.field_behavior) = REQUIRED]; } // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] message GenerateConsistencyTokenRequest { - // Required. The unique name of the Table for which to create a consistency - // token. Values are of the form + // Required. The unique name of the Table for which to create a consistency token. + // Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; } @@ -568,12 +645,14 @@ message GenerateConsistencyTokenResponse { // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] message CheckConsistencyRequest { - // Required. The unique name of the Table for which to check replication - // consistency. Values are of the form + // Required. The unique name of the Table for which to check replication consistency. + // Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; // Required. The token created using GenerateConsistencyToken for the Table. @@ -601,7 +680,9 @@ message SnapshotTableRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; // Required. The name of the cluster where the snapshot will be created in. @@ -614,9 +695,9 @@ message SnapshotTableRequest { } ]; - // Required. The ID by which the new snapshot should be referred to within the - // parent cluster, e.g., `mysnapshot` of the form: - // `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` rather than + // Required. The ID by which the new snapshot should be referred to within the parent + // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` + // rather than // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot`. string snapshot_id = 3 [(google.api.field_behavior) = REQUIRED]; @@ -657,8 +738,8 @@ message GetSnapshotRequest { // feature might be changed in backward-incompatible ways and is not recommended // for production use. It is not subject to any SLA or deprecation policy. message ListSnapshotsRequest { - // Required. The unique name of the cluster for which snapshots should be - // listed. Values are of the form + // Required. The unique name of the cluster for which snapshots should be listed. + // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}`. // Use `{cluster} = '-'` to list snapshots for all clusters in an instance, // e.g., `projects/{project}/instances/{instance}/clusters/-`. @@ -748,8 +829,7 @@ message CreateTableFromSnapshotMetadata { google.protobuf.Timestamp finish_time = 3; } -// The request for -// [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. +// The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. message CreateBackupRequest { // Required. This must be one of the clusters in the instance in which this // table is located. The backup will be stored in this cluster. Values are @@ -789,20 +869,7 @@ message CreateBackupMetadata { google.protobuf.Timestamp end_time = 4; } -// The request for -// [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. -message GetBackupRequest { - // Required. Name of the backup. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Backup" } - ]; -} - -// The request for -// [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. +// The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. message UpdateBackupRequest { // Required. The backup to update. `backup.name`, and the fields to be updated // as specified by `update_mask` are required. Other fields are ignored. @@ -815,26 +882,38 @@ message UpdateBackupRequest { // resource, not to the request message. The field mask must always be // specified; this prevents any future fields from being erased accidentally // by clients that do not know about them. - google.protobuf.FieldMask update_mask = 2 - [(google.api.field_behavior) = REQUIRED]; + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; } -// The request for -// [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. +// The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. +message GetBackupRequest { + // Required. Name of the backup. + // Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Backup" + } + ]; +} + +// The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. message DeleteBackupRequest { // Required. Name of the backup to delete. // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Backup" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Backup" + } ]; } -// The request for -// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +// The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. message ListBackupsRequest { - // Required. The cluster to list backups from. Values are of the + // Required. The cluster to list backups from. Values are of the // form `projects/{project}/instances/{instance}/clusters/{cluster}`. // Use `{cluster} = '-'` to list backups for all clusters in an instance, // e.g., `projects/{project}/instances/{instance}/clusters/-`. @@ -849,7 +928,7 @@ message ListBackupsRequest { // The expression must specify the field name, a comparison operator, // and the value that you want to use for filtering. The value must be a // string, a number, or a boolean. The comparison operator must be - // <, >, <=, >=, !=, =, or :. Colon ‘:’ represents a HAS operator which is + // <, >, <=, >=, !=, =, or :. Colon ':' represents a HAS operator which is // roughly synonymous with equality. Filter rules are case insensitive. // // The fields eligible for filtering are: @@ -880,9 +959,8 @@ message ListBackupsRequest { string filter = 2; // An expression for specifying the sort order of the results of the request. - // The string value should specify one or more fields in - // [Backup][google.bigtable.admin.v2.Backup]. The full syntax is described at - // https://aip.dev/132#ordering. + // The string value should specify one or more fields in [Backup][google.bigtable.admin.v2.Backup]. The full + // syntax is described at https://aip.dev/132#ordering. // // Fields supported are: // * name @@ -907,88 +985,19 @@ message ListBackupsRequest { int32 page_size = 4; // If non-empty, `page_token` should contain a - // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] - // from a previous - // [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the - // same `parent` and with the same `filter`. + // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] from a + // previous [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the same `parent` and with the same + // `filter`. string page_token = 5; } -// The response for -// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +// The response for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. message ListBackupsResponse { // The list of matching backups. repeated Backup backups = 1; // `next_page_token` can be sent in a subsequent - // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call - // to fetch more of the matching backups. + // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call to fetch more + // of the matching backups. string next_page_token = 2; } - -// The request for -// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. -message RestoreTableRequest { - // Required. The name of the instance in which to create the restored - // table. This instance must be the parent of the source backup. Values are - // of the form `projects//instances/`. - string parent = 1; - - // Required. The id of the table to create and restore to. This - // table must not already exist. The `table_id` appended to - // `parent` forms the full table name of the form - // `projects//instances//tables/`. - string table_id = 2; - - // Required. The source from which to restore. - oneof source { - // Name of the backup from which to restore. Values are of the form - // `projects//instances//clusters//backups/`. - string backup = 3; - } -} - -// Metadata type for the long-running operation returned by -// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. -message RestoreTableMetadata { - // Name of the table being created and restored to. - string name = 1; - - // The type of the restore source. - RestoreSourceType source_type = 2; - - // Information about the source used to restore the table, as specified by - // `source` in - // [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. - oneof source_info { - BackupInfo backup_info = 3; - } - - // If exists, the name of the long-running operation that will be used to - // track the post-restore optimization process to optimize the performance of - // the restored table. The metadata type of the long-running operation is - // [OptimizeRestoreTableMetadata][]. The response type is - // [Empty][google.protobuf.Empty]. This long-running operation may be - // automatically created by the system if applicable after the - // RestoreTable long-running operation completes successfully. This operation - // may not be created if the table is already optimized or the restore was - // not successful. - string optimize_table_operation_name = 4; - - // The progress of the - // [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] - // operation. - OperationProgress progress = 5; -} - -// Metadata type for the long-running operation used to track the progress -// of optimizations performed on a newly restored table. This long-running -// operation is automatically created by the system after the successful -// completion of a table restore, and cannot be cancelled. -message OptimizeRestoredTableMetadata { - // Name of the restored table being optimized. - string name = 1; - - // The progress of the post-restore optimizations. - OperationProgress progress = 2; -} diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index 5ca167d87..2e2b6f7d9 100644 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -39,7 +39,7 @@ syntax="proto3", serialized_options=b'\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', create_key=_descriptor._internal_create_key, - serialized_pb=b'\n?google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto\x1a\x30google/cloud/bigtable_admin_v2/proto/table.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xfc\x01\n\x12\x43reateTableRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x33\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.TableB\x03\xe0\x41\x02\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"\xb4\x01\n\x1e\x43reateTableFromSnapshotRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x0fsource_snapshot\x18\x03 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\x94\x01\n\x13\x44ropRowRangeRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"\xa8\x01\n\x11ListTablesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"z\n\x0fGetTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View"I\n\x12\x44\x65leteTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"\xda\x02\n\x1bModifyColumnFamiliesRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12^\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.ModificationB\x03\xe0\x41\x02\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"V\n\x1fGenerateConsistencyTokenRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"n\n\x17\x43heckConsistencyRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x1e\n\x11\x63onsistency_token\x18\x02 \x01(\tB\x03\xe0\x41\x02".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\xdc\x01\n\x14SnapshotTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x38\n\x07\x63luster\x18\x02 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x18\n\x0bsnapshot_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t"L\n\x12GetSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"v\n\x14ListSnapshotsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"O\n\x15\x44\x65leteSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x9d\x01\n\x13\x43reateBackupRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x16\n\tbackup_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x35\n\x06\x62\x61\x63kup\x18\x03 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02"\x98\x01\n\x14\x43reateBackupMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csource_table\x18\x02 \x01(\t\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"H\n\x10GetBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x82\x01\n\x13UpdateBackupRequest\x12\x35\n\x06\x62\x61\x63kup\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"K\n\x13\x44\x65leteBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x96\x01\n\x12ListBackupsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x05 \x01(\t"a\n\x13ListBackupsResponse\x12\x31\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x13RestoreTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x10\n\x06\x62\x61\x63kup\x18\x03 \x01(\tH\x00\x42\x08\n\x06source"\x98\x02\n\x14RestoreTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\x0bsource_type\x18\x02 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x12%\n\x1doptimize_table_operation_name\x18\x04 \x01(\t\x12=\n\x08progress\x18\x05 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgressB\r\n\x0bsource_info"l\n\x1dOptimizeRestoredTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12=\n\x08progress\x18\x02 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgress2\xc8$\n\x12\x42igtableTableAdmin\x12\xab\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"M\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\xda\x41\x15parent,table_id,table\x12\x8a\x02\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"\x95\x01\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\xda\x41\x1fparent,table_id,source_snapshot\xca\x41(\n\x05Table\x12\x1f\x43reateTableFromSnapshotMetadata\x12\xa4\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse";\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\xda\x41\x06parent\x12\x91\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"9\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\x8e\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"9\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\xcf\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"_\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\xda\x41\x12name,modifications\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe8\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"U\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\xda\x41\x04name\x12\xda\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"_\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\xda\x41\x16name,consistency_token\x12\xea\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation"\x89\x01\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\xda\x41$name,cluster,snapshot_id,description\xca\x41!\n\x08Snapshot\x12\x15SnapshotTableMetadata\x12\xa8\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"G\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xbb\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"I\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\xda\x41\x06parent\x12\xa2\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xe0\x01\n\x0c\x43reateBackup\x12-.google.bigtable.admin.v2.CreateBackupRequest\x1a\x1d.google.longrunning.Operation"\x81\x01\x82\xd3\xe4\x93\x02@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\x06\x62\x61\x63kup\xca\x41\x1e\n\x06\x42\x61\x63kup\x12\x14\x43reateBackupMetadata\xda\x41\x17parent,backup_id,backup\x12\xa0\x01\n\tGetBackup\x12*.google.bigtable.admin.v2.GetBackupRequest\x1a .google.bigtable.admin.v2.Backup"E\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xc3\x01\n\x0cUpdateBackup\x12-.google.bigtable.admin.v2.UpdateBackupRequest\x1a .google.bigtable.admin.v2.Backup"b\x82\xd3\xe4\x93\x02G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\x06\x62\x61\x63kup\xda\x41\x12\x62\x61\x63kup,update_mask\x12\x9c\x01\n\x0c\x44\x65leteBackup\x12-.google.bigtable.admin.v2.DeleteBackupRequest\x1a\x16.google.protobuf.Empty"E\x82\xd3\xe4\x93\x02\x38*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xb3\x01\n\x0bListBackups\x12,.google.bigtable.admin.v2.ListBackupsRequest\x1a-.google.bigtable.admin.v2.ListBackupsResponse"G\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{parent=projects/*/instances/*/clusters/*}/backups\xda\x41\x06parent\x12\xbb\x01\n\x0cRestoreTable\x12-.google.bigtable.admin.v2.RestoreTableRequest\x1a\x1d.google.longrunning.Operation"]\x82\xd3\xe4\x93\x02\x37"2/v2/{parent=projects/*/instances/*}/tables:restore:\x01*\xca\x41\x1d\n\x05Table\x12\x14RestoreTableMetadata\x12\x9c\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"Q\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\xf3\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\xa7\x01\x82\xd3\xe4\x93\x02\x8e\x01";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\x01*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xa4\x02\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"\xb8\x01\x82\xd3\xe4\x93\x02\x9a\x01"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\x01*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\xde\x02\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xbb\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xdf\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', + serialized_pb=b'\n?google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto\x1a\x30google/cloud/bigtable_admin_v2/proto/table.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xa7\x01\n\x13RestoreTableRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x35\n\x06\x62\x61\x63kup\x18\x03 \x01(\tB#\xfa\x41 \n\x1e\x62igtable.googleapis.com/BackupH\x00\x42\x08\n\x06source"\x98\x02\n\x14RestoreTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\x0bsource_type\x18\x02 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x12%\n\x1doptimize_table_operation_name\x18\x04 \x01(\t\x12=\n\x08progress\x18\x05 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgressB\r\n\x0bsource_info"l\n\x1dOptimizeRestoredTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12=\n\x08progress\x18\x02 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgress"\xfc\x01\n\x12\x43reateTableRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x33\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.TableB\x03\xe0\x41\x02\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"\xb4\x01\n\x1e\x43reateTableFromSnapshotRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x0fsource_snapshot\x18\x03 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\x94\x01\n\x13\x44ropRowRangeRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"\xa8\x01\n\x11ListTablesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"z\n\x0fGetTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View"I\n\x12\x44\x65leteTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"\xda\x02\n\x1bModifyColumnFamiliesRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12^\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.ModificationB\x03\xe0\x41\x02\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"V\n\x1fGenerateConsistencyTokenRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"n\n\x17\x43heckConsistencyRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x1e\n\x11\x63onsistency_token\x18\x02 \x01(\tB\x03\xe0\x41\x02".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\xdc\x01\n\x14SnapshotTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x38\n\x07\x63luster\x18\x02 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x18\n\x0bsnapshot_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t"L\n\x12GetSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"v\n\x14ListSnapshotsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"O\n\x15\x44\x65leteSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x9d\x01\n\x13\x43reateBackupRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x16\n\tbackup_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x35\n\x06\x62\x61\x63kup\x18\x03 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02"\x98\x01\n\x14\x43reateBackupMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csource_table\x18\x02 \x01(\t\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x82\x01\n\x13UpdateBackupRequest\x12\x35\n\x06\x62\x61\x63kup\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"H\n\x10GetBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"K\n\x13\x44\x65leteBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x96\x01\n\x12ListBackupsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x05 \x01(\t"a\n\x13ListBackupsResponse\x12\x31\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\x98%\n\x12\x42igtableTableAdmin\x12\xab\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"M\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\xda\x41\x15parent,table_id,table\x12\x8a\x02\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"\x95\x01\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\xda\x41\x1fparent,table_id,source_snapshot\xca\x41(\n\x05Table\x12\x1f\x43reateTableFromSnapshotMetadata\x12\xa4\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse";\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\xda\x41\x06parent\x12\x91\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"9\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\x8e\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"9\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\xcf\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"_\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\xda\x41\x12name,modifications\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe8\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"U\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\xda\x41\x04name\x12\xda\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"_\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\xda\x41\x16name,consistency_token\x12\xea\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation"\x89\x01\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\xda\x41$name,cluster,snapshot_id,description\xca\x41!\n\x08Snapshot\x12\x15SnapshotTableMetadata\x12\xa8\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"G\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xbb\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"I\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\xda\x41\x06parent\x12\xa2\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xe0\x01\n\x0c\x43reateBackup\x12-.google.bigtable.admin.v2.CreateBackupRequest\x1a\x1d.google.longrunning.Operation"\x81\x01\x82\xd3\xe4\x93\x02@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\x06\x62\x61\x63kup\xda\x41\x17parent,backup_id,backup\xca\x41\x1e\n\x06\x42\x61\x63kup\x12\x14\x43reateBackupMetadata\x12\xa0\x01\n\tGetBackup\x12*.google.bigtable.admin.v2.GetBackupRequest\x1a .google.bigtable.admin.v2.Backup"E\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xc3\x01\n\x0cUpdateBackup\x12-.google.bigtable.admin.v2.UpdateBackupRequest\x1a .google.bigtable.admin.v2.Backup"b\x82\xd3\xe4\x93\x02G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\x06\x62\x61\x63kup\xda\x41\x12\x62\x61\x63kup,update_mask\x12\x9c\x01\n\x0c\x44\x65leteBackup\x12-.google.bigtable.admin.v2.DeleteBackupRequest\x1a\x16.google.protobuf.Empty"E\x82\xd3\xe4\x93\x02\x38*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xb3\x01\n\x0bListBackups\x12,.google.bigtable.admin.v2.ListBackupsRequest\x1a-.google.bigtable.admin.v2.ListBackupsResponse"G\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{parent=projects/*/instances/*/clusters/*}/backups\xda\x41\x06parent\x12\xbb\x01\n\x0cRestoreTable\x12-.google.bigtable.admin.v2.RestoreTableRequest\x1a\x1d.google.longrunning.Operation"]\x82\xd3\xe4\x93\x02\x37"2/v2/{parent=projects/*/instances/*}/tables:restore:\x01*\xca\x41\x1d\n\x05Table\x12\x14RestoreTableMetadata\x12\xec\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\xa0\x01\x82\xd3\xe4\x93\x02\x8e\x01";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\x01*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\xf3\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\xa7\x01\x82\xd3\xe4\x93\x02\x8e\x01";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\x01*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xa4\x02\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"\xb8\x01\x82\xd3\xe4\x93\x02\x9a\x01"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\x01*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\xde\x02\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xbb\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xdf\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -58,6 +58,280 @@ ) +_RESTORETABLEREQUEST = _descriptor.Descriptor( + name="RestoreTableRequest", + full_name="google.bigtable.admin.v2.RestoreTableRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.RestoreTableRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="table_id", + full_name="google.bigtable.admin.v2.RestoreTableRequest.table_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="backup", + full_name="google.bigtable.admin.v2.RestoreTableRequest.backup", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\372A \n\036bigtable.googleapis.com/Backup", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="source", + full_name="google.bigtable.admin.v2.RestoreTableRequest.source", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=535, + serialized_end=702, +) + + +_RESTORETABLEMETADATA = _descriptor.Descriptor( + name="RestoreTableMetadata", + full_name="google.bigtable.admin.v2.RestoreTableMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="source_type", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_type", + index=1, + number=2, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="backup_info", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.backup_info", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="optimize_table_operation_name", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.optimize_table_operation_name", + index=3, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="progress", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.progress", + index=4, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="source_info", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_info", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=705, + serialized_end=985, +) + + +_OPTIMIZERESTOREDTABLEMETADATA = _descriptor.Descriptor( + name="OptimizeRestoredTableMetadata", + full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="progress", + full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=987, + serialized_end=1095, +) + + _CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor( name="Split", full_name="google.bigtable.admin.v2.CreateTableRequest.Split", @@ -94,8 +368,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=767, - serialized_end=787, + serialized_start=1330, + serialized_end=1350, ) _CREATETABLEREQUEST = _descriptor.Descriptor( @@ -191,8 +465,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=535, - serialized_end=787, + serialized_start=1098, + serialized_end=1350, ) @@ -270,8 +544,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=790, - serialized_end=970, + serialized_start=1353, + serialized_end=1533, ) @@ -358,8 +632,8 @@ fields=[], ), ], - serialized_start=973, - serialized_end=1121, + serialized_start=1536, + serialized_end=1684, ) @@ -456,8 +730,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1124, - serialized_end=1292, + serialized_start=1687, + serialized_end=1855, ) @@ -516,8 +790,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1294, - serialized_end=1388, + serialized_start=1857, + serialized_end=1951, ) @@ -576,8 +850,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1390, - serialized_end=1512, + serialized_start=1953, + serialized_end=2075, ) @@ -617,8 +891,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1514, - serialized_end=1587, + serialized_start=2077, + serialized_end=2150, ) @@ -724,8 +998,8 @@ fields=[], ), ], - serialized_start=1771, - serialized_end=1936, + serialized_start=2334, + serialized_end=2499, ) _MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( @@ -783,8 +1057,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1590, - serialized_end=1936, + serialized_start=2153, + serialized_end=2499, ) @@ -824,8 +1098,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1938, - serialized_end=2024, + serialized_start=2501, + serialized_end=2587, ) @@ -865,8 +1139,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2026, - serialized_end=2087, + serialized_start=2589, + serialized_end=2650, ) @@ -925,8 +1199,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2089, - serialized_end=2199, + serialized_start=2652, + serialized_end=2762, ) @@ -966,8 +1240,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2201, - serialized_end=2247, + serialized_start=2764, + serialized_end=2810, ) @@ -1083,8 +1357,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2250, - serialized_end=2470, + serialized_start=2813, + serialized_end=3033, ) @@ -1124,8 +1398,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2472, - serialized_end=2548, + serialized_start=3035, + serialized_end=3111, ) @@ -1203,8 +1477,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2550, - serialized_end=2668, + serialized_start=3113, + serialized_end=3231, ) @@ -1263,8 +1537,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2670, - serialized_end=2773, + serialized_start=3233, + serialized_end=3336, ) @@ -1304,8 +1578,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2775, - serialized_end=2854, + serialized_start=3338, + serialized_end=3417, ) @@ -1383,8 +1657,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2857, - serialized_end=3053, + serialized_start=3420, + serialized_end=3616, ) @@ -1462,8 +1736,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3056, - serialized_end=3272, + serialized_start=3619, + serialized_end=3835, ) @@ -1541,8 +1815,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3275, - serialized_end=3432, + serialized_start=3838, + serialized_end=3995, ) @@ -1617,269 +1891,10 @@ index=3, number=4, type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3435, - serialized_end=3587, -) - - -_GETBACKUPREQUEST = _descriptor.Descriptor( - name="GetBackupRequest", - full_name="google.bigtable.admin.v2.GetBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetBackupRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3589, - serialized_end=3661, -) - - -_UPDATEBACKUPREQUEST = _descriptor.Descriptor( - name="UpdateBackupRequest", - full_name="google.bigtable.admin.v2.UpdateBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.UpdateBackupRequest.backup", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.bigtable.admin.v2.UpdateBackupRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3664, - serialized_end=3794, -) - - -_DELETEBACKUPREQUEST = _descriptor.Descriptor( - name="DeleteBackupRequest", - full_name="google.bigtable.admin.v2.DeleteBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteBackupRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3796, - serialized_end=3871, -) - - -_LISTBACKUPSREQUEST = _descriptor.Descriptor( - name="ListBackupsRequest", - full_name="google.bigtable.admin.v2.ListBackupsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListBackupsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.bigtable.admin.v2.ListBackupsRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="order_by", - full_name="google.bigtable.admin.v2.ListBackupsRequest.order_by", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListBackupsRequest.page_size", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListBackupsRequest.page_token", - index=4, - number=5, - type=9, - cpp_type=9, + cpp_type=10, label=1, has_default_value=False, - default_value=b"".decode("utf-8"), + default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -1898,54 +1913,54 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3874, - serialized_end=4024, + serialized_start=3998, + serialized_end=4150, ) -_LISTBACKUPSRESPONSE = _descriptor.Descriptor( - name="ListBackupsResponse", - full_name="google.bigtable.admin.v2.ListBackupsResponse", +_UPDATEBACKUPREQUEST = _descriptor.Descriptor( + name="UpdateBackupRequest", + full_name="google.bigtable.admin.v2.UpdateBackupRequest", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name="backups", - full_name="google.bigtable.admin.v2.ListBackupsResponse.backups", + name="backup", + full_name="google.bigtable.admin.v2.UpdateBackupRequest.backup", index=0, number=1, type=11, cpp_type=10, - label=3, + label=1, has_default_value=False, - default_value=[], + default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListBackupsResponse.next_page_token", + name="update_mask", + full_name="google.bigtable.admin.v2.UpdateBackupRequest.update_mask", index=1, number=2, - type=9, - cpp_type=9, + type=11, + cpp_type=10, label=1, has_default_value=False, - default_value=b"".decode("utf-8"), + default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), @@ -1958,22 +1973,22 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4026, - serialized_end=4123, + serialized_start=4153, + serialized_end=4283, ) -_RESTORETABLEREQUEST = _descriptor.Descriptor( - name="RestoreTableRequest", - full_name="google.bigtable.admin.v2.RestoreTableRequest", +_GETBACKUPREQUEST = _descriptor.Descriptor( + name="GetBackupRequest", + full_name="google.bigtable.admin.v2.GetBackupRequest", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.RestoreTableRequest.parent", + name="name", + full_name="google.bigtable.admin.v2.GetBackupRequest.name", index=0, number=1, type=9, @@ -1986,34 +2001,37 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table_id", - full_name="google.bigtable.admin.v2.RestoreTableRequest.table_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=4285, + serialized_end=4357, +) + + +_DELETEBACKUPREQUEST = _descriptor.Descriptor( + name="DeleteBackupRequest", + full_name="google.bigtable.admin.v2.DeleteBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.RestoreTableRequest.backup", - index=2, - number=3, + name="name", + full_name="google.bigtable.admin.v2.DeleteBackupRequest.name", + index=0, + number=1, type=9, cpp_type=9, label=1, @@ -2024,7 +2042,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), @@ -2036,32 +2054,23 @@ is_extendable=False, syntax="proto3", extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source", - full_name="google.bigtable.admin.v2.RestoreTableRequest.source", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=4125, - serialized_end=4208, + oneofs=[], + serialized_start=4359, + serialized_end=4434, ) -_RESTORETABLEMETADATA = _descriptor.Descriptor( - name="RestoreTableMetadata", - full_name="google.bigtable.admin.v2.RestoreTableMetadata", +_LISTBACKUPSREQUEST = _descriptor.Descriptor( + name="ListBackupsRequest", + full_name="google.bigtable.admin.v2.ListBackupsRequest", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.name", + name="parent", + full_name="google.bigtable.admin.v2.ListBackupsRequest.parent", index=0, number=1, type=9, @@ -2074,20 +2083,20 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", file=DESCRIPTOR, create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( - name="source_type", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_type", + name="filter", + full_name="google.bigtable.admin.v2.ListBackupsRequest.filter", index=1, number=2, - type=14, - cpp_type=8, + type=9, + cpp_type=9, label=1, has_default_value=False, - default_value=0, + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2098,15 +2107,15 @@ create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( - name="backup_info", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.backup_info", + name="order_by", + full_name="google.bigtable.admin.v2.ListBackupsRequest.order_by", index=2, number=3, - type=11, - cpp_type=10, + type=9, + cpp_type=9, label=1, has_default_value=False, - default_value=None, + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2117,15 +2126,15 @@ create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( - name="optimize_table_operation_name", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.optimize_table_operation_name", + name="page_size", + full_name="google.bigtable.admin.v2.ListBackupsRequest.page_size", index=3, number=4, - type=9, - cpp_type=9, + type=5, + cpp_type=1, label=1, has_default_value=False, - default_value=b"".decode("utf-8"), + default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -2136,15 +2145,15 @@ create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( - name="progress", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.progress", + name="page_token", + full_name="google.bigtable.admin.v2.ListBackupsRequest.page_token", index=4, number=5, - type=11, - cpp_type=10, + type=9, + cpp_type=9, label=1, has_default_value=False, - default_value=None, + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2162,39 +2171,30 @@ is_extendable=False, syntax="proto3", extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source_info", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_info", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=4211, - serialized_end=4491, + oneofs=[], + serialized_start=4437, + serialized_end=4587, ) -_OPTIMIZERESTOREDTABLEMETADATA = _descriptor.Descriptor( - name="OptimizeRestoredTableMetadata", - full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata", +_LISTBACKUPSRESPONSE = _descriptor.Descriptor( + name="ListBackupsResponse", + full_name="google.bigtable.admin.v2.ListBackupsResponse", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.name", + name="backups", + full_name="google.bigtable.admin.v2.ListBackupsResponse.backups", index=0, number=1, - type=9, - cpp_type=9, - label=1, + type=11, + cpp_type=10, + label=3, has_default_value=False, - default_value=b"".decode("utf-8"), + default_value=[], message_type=None, enum_type=None, containing_type=None, @@ -2205,15 +2205,15 @@ create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( - name="progress", - full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress", + name="next_page_token", + full_name="google.bigtable.admin.v2.ListBackupsResponse.next_page_token", index=1, number=2, - type=11, - cpp_type=10, + type=9, + cpp_type=9, label=1, has_default_value=False, - default_value=None, + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -2232,10 +2232,42 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4493, - serialized_end=4601, + serialized_start=4589, + serialized_end=4686, ) +_RESTORETABLEREQUEST.oneofs_by_name["source"].fields.append( + _RESTORETABLEREQUEST.fields_by_name["backup"] +) +_RESTORETABLEREQUEST.fields_by_name[ + "backup" +].containing_oneof = _RESTORETABLEREQUEST.oneofs_by_name["source"] +_RESTORETABLEMETADATA.fields_by_name[ + "source_type" +].enum_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._RESTORESOURCETYPE +) +_RESTORETABLEMETADATA.fields_by_name[ + "backup_info" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUPINFO +) +_RESTORETABLEMETADATA.fields_by_name[ + "progress" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS +) +_RESTORETABLEMETADATA.oneofs_by_name["source_info"].fields.append( + _RESTORETABLEMETADATA.fields_by_name["backup_info"] +) +_RESTORETABLEMETADATA.fields_by_name[ + "backup_info" +].containing_oneof = _RESTORETABLEMETADATA.oneofs_by_name["source_info"] +_OPTIMIZERESTOREDTABLEMETADATA.fields_by_name[ + "progress" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS +) _CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST _CREATETABLEREQUEST.fields_by_name[ "table" @@ -2342,50 +2374,23 @@ "end_time" ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _UPDATEBACKUPREQUEST.fields_by_name[ - "backup" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP -) -_UPDATEBACKUPREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTBACKUPSRESPONSE.fields_by_name[ - "backups" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP -) -_RESTORETABLEREQUEST.oneofs_by_name["source"].fields.append( - _RESTORETABLEREQUEST.fields_by_name["backup"] -) -_RESTORETABLEREQUEST.fields_by_name[ - "backup" -].containing_oneof = _RESTORETABLEREQUEST.oneofs_by_name["source"] -_RESTORETABLEMETADATA.fields_by_name[ - "source_type" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._RESTORESOURCETYPE -) -_RESTORETABLEMETADATA.fields_by_name[ - "backup_info" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUPINFO -) -_RESTORETABLEMETADATA.fields_by_name[ - "progress" + "backup" ].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS -) -_RESTORETABLEMETADATA.oneofs_by_name["source_info"].fields.append( - _RESTORETABLEMETADATA.fields_by_name["backup_info"] + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP ) -_RESTORETABLEMETADATA.fields_by_name[ - "backup_info" -].containing_oneof = _RESTORETABLEMETADATA.oneofs_by_name["source_info"] -_OPTIMIZERESTOREDTABLEMETADATA.fields_by_name[ - "progress" +_UPDATEBACKUPREQUEST.fields_by_name[ + "update_mask" +].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK +_LISTBACKUPSRESPONSE.fields_by_name[ + "backups" ].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP ) +DESCRIPTOR.message_types_by_name["RestoreTableRequest"] = _RESTORETABLEREQUEST +DESCRIPTOR.message_types_by_name["RestoreTableMetadata"] = _RESTORETABLEMETADATA +DESCRIPTOR.message_types_by_name[ + "OptimizeRestoredTableMetadata" +] = _OPTIMIZERESTOREDTABLEMETADATA DESCRIPTOR.message_types_by_name["CreateTableRequest"] = _CREATETABLEREQUEST DESCRIPTOR.message_types_by_name[ "CreateTableFromSnapshotRequest" @@ -2417,18 +2422,105 @@ ] = _CREATETABLEFROMSNAPSHOTMETADATA DESCRIPTOR.message_types_by_name["CreateBackupRequest"] = _CREATEBACKUPREQUEST DESCRIPTOR.message_types_by_name["CreateBackupMetadata"] = _CREATEBACKUPMETADATA -DESCRIPTOR.message_types_by_name["GetBackupRequest"] = _GETBACKUPREQUEST DESCRIPTOR.message_types_by_name["UpdateBackupRequest"] = _UPDATEBACKUPREQUEST +DESCRIPTOR.message_types_by_name["GetBackupRequest"] = _GETBACKUPREQUEST DESCRIPTOR.message_types_by_name["DeleteBackupRequest"] = _DELETEBACKUPREQUEST DESCRIPTOR.message_types_by_name["ListBackupsRequest"] = _LISTBACKUPSREQUEST DESCRIPTOR.message_types_by_name["ListBackupsResponse"] = _LISTBACKUPSRESPONSE -DESCRIPTOR.message_types_by_name["RestoreTableRequest"] = _RESTORETABLEREQUEST -DESCRIPTOR.message_types_by_name["RestoreTableMetadata"] = _RESTORETABLEMETADATA -DESCRIPTOR.message_types_by_name[ - "OptimizeRestoredTableMetadata" -] = _OPTIMIZERESTOREDTABLEMETADATA _sym_db.RegisterFileDescriptor(DESCRIPTOR) +RestoreTableRequest = _reflection.GeneratedProtocolMessageType( + "RestoreTableRequest", + (_message.Message,), + { + "DESCRIPTOR": _RESTORETABLEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableA + dmin.RestoreTable]. + + Attributes: + parent: + Required. The name of the instance in which to create the + restored table. This instance must be the parent of the source + backup. Values are of the form + ``projects//instances/``. + table_id: + Required. The id of the table to create and restore to. This + table must not already exist. The ``table_id`` appended to + ``parent`` forms the full table name of the form + ``projects//instances//tables/``. + source: + Required. The source from which to restore. + backup: + Name of the backup from which to restore. Values are of the + form ``projects//instances//clusters//backups/``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableRequest) + }, +) +_sym_db.RegisterMessage(RestoreTableRequest) + +RestoreTableMetadata = _reflection.GeneratedProtocolMessageType( + "RestoreTableMetadata", + (_message.Message,), + { + "DESCRIPTOR": _RESTORETABLEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Metadata type for the long-running operation returned by [RestoreTable + ][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + Attributes: + name: + Name of the table being created and restored to. + source_type: + The type of the restore source. + source_info: + Information about the source used to restore the table, as + specified by ``source`` in [RestoreTableRequest][google.bigtab + le.admin.v2.RestoreTableRequest]. + optimize_table_operation_name: + If exists, the name of the long-running operation that will be + used to track the post-restore optimization process to + optimize the performance of the restored table. The metadata + type of the long-running operation is + [OptimizeRestoreTableMetadata][]. The response type is + [Empty][google.protobuf.Empty]. This long-running operation + may be automatically created by the system if applicable after + the RestoreTable long-running operation completes + successfully. This operation may not be created if the table + is already optimized or the restore was not successful. + progress: + The progress of the [RestoreTable][google.bigtable.admin.v2.Bi + gtableTableAdmin.RestoreTable] operation. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableMetadata) + }, +) +_sym_db.RegisterMessage(RestoreTableMetadata) + +OptimizeRestoredTableMetadata = _reflection.GeneratedProtocolMessageType( + "OptimizeRestoredTableMetadata", + (_message.Message,), + { + "DESCRIPTOR": _OPTIMIZERESTOREDTABLEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Metadata type for the long-running operation used to track the + progress of optimizations performed on a newly restored table. This + long-running operation is automatically created by the system after + the successful completion of a table restore, and cannot be cancelled. + + Attributes: + name: + Name of the restored table being optimized. + progress: + The progress of the post-restore optimizations. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.OptimizeRestoredTableMetadata) + }, +) +_sym_db.RegisterMessage(OptimizeRestoredTableMetadata) + CreateTableRequest = _reflection.GeneratedProtocolMessageType( "CreateTableRequest", (_message.Message,), @@ -3048,26 +3140,6 @@ ) _sym_db.RegisterMessage(CreateBackupMetadata) -GetBackupRequest = _reflection.GeneratedProtocolMessageType( - "GetBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - - Attributes: - name: - Required. Name of the backup. Values are of the form ``project - s/{project}/instances/{instance}/clusters/{cluster}/backups/{b - ackup}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetBackupRequest) - }, -) -_sym_db.RegisterMessage(GetBackupRequest) - UpdateBackupRequest = _reflection.GeneratedProtocolMessageType( "UpdateBackupRequest", (_message.Message,), @@ -3096,6 +3168,26 @@ ) _sym_db.RegisterMessage(UpdateBackupRequest) +GetBackupRequest = _reflection.GeneratedProtocolMessageType( + "GetBackupRequest", + (_message.Message,), + { + "DESCRIPTOR": _GETBACKUPREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + + Attributes: + name: + Required. Name of the backup. Values are of the form ``project + s/{project}/instances/{instance}/clusters/{cluster}/backups/{b + ackup}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetBackupRequest) + }, +) +_sym_db.RegisterMessage(GetBackupRequest) + DeleteBackupRequest = _reflection.GeneratedProtocolMessageType( "DeleteBackupRequest", (_message.Message,), @@ -3211,100 +3303,11 @@ ) _sym_db.RegisterMessage(ListBackupsResponse) -RestoreTableRequest = _reflection.GeneratedProtocolMessageType( - "RestoreTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _RESTORETABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableA - dmin.RestoreTable]. - - Attributes: - parent: - Required. The name of the instance in which to create the - restored table. This instance must be the parent of the source - backup. Values are of the form - ``projects//instances/``. - table_id: - Required. The id of the table to create and restore to. This - table must not already exist. The ``table_id`` appended to - ``parent`` forms the full table name of the form - ``projects//instances//tables/``. - source: - Required. The source from which to restore. - backup: - Name of the backup from which to restore. Values are of the - form ``projects//instances//clusters//backups/``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableRequest) - }, -) -_sym_db.RegisterMessage(RestoreTableRequest) - -RestoreTableMetadata = _reflection.GeneratedProtocolMessageType( - "RestoreTableMetadata", - (_message.Message,), - { - "DESCRIPTOR": _RESTORETABLEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Metadata type for the long-running operation returned by [RestoreTable - ][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - - Attributes: - name: - Name of the table being created and restored to. - source_type: - The type of the restore source. - source_info: - Information about the source used to restore the table, as - specified by ``source`` in [RestoreTableRequest][google.bigtab - le.admin.v2.RestoreTableRequest]. - optimize_table_operation_name: - If exists, the name of the long-running operation that will be - used to track the post-restore optimization process to - optimize the performance of the restored table. The metadata - type of the long-running operation is - [OptimizeRestoreTableMetadata][]. The response type is - [Empty][google.protobuf.Empty]. This long-running operation - may be automatically created by the system if applicable after - the RestoreTable long-running operation completes - successfully. This operation may not be created if the table - is already optimized or the restore was not successful. - progress: - The progress of the [RestoreTable][google.bigtable.admin.v2.Bi - gtableTableAdmin.RestoreTable] operation. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableMetadata) - }, -) -_sym_db.RegisterMessage(RestoreTableMetadata) - -OptimizeRestoredTableMetadata = _reflection.GeneratedProtocolMessageType( - "OptimizeRestoredTableMetadata", - (_message.Message,), - { - "DESCRIPTOR": _OPTIMIZERESTOREDTABLEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Metadata type for the long-running operation used to track the - progress of optimizations performed on a newly restored table. This - long-running operation is automatically created by the system after - the successful completion of a table restore, and cannot be cancelled. - - Attributes: - name: - Name of the restored table being optimized. - progress: - The progress of the post-restore optimizations. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.OptimizeRestoredTableMetadata) - }, -) -_sym_db.RegisterMessage(OptimizeRestoredTableMetadata) - DESCRIPTOR._options = None +_RESTORETABLEREQUEST.fields_by_name["parent"]._options = None +_RESTORETABLEREQUEST.fields_by_name["table_id"]._options = None +_RESTORETABLEREQUEST.fields_by_name["backup"]._options = None _CREATETABLEREQUEST.fields_by_name["parent"]._options = None _CREATETABLEREQUEST.fields_by_name["table_id"]._options = None _CREATETABLEREQUEST.fields_by_name["table"]._options = None @@ -3329,9 +3332,9 @@ _CREATEBACKUPREQUEST.fields_by_name["parent"]._options = None _CREATEBACKUPREQUEST.fields_by_name["backup_id"]._options = None _CREATEBACKUPREQUEST.fields_by_name["backup"]._options = None -_GETBACKUPREQUEST.fields_by_name["name"]._options = None _UPDATEBACKUPREQUEST.fields_by_name["backup"]._options = None _UPDATEBACKUPREQUEST.fields_by_name["update_mask"]._options = None +_GETBACKUPREQUEST.fields_by_name["name"]._options = None _DELETEBACKUPREQUEST.fields_by_name["name"]._options = None _LISTBACKUPSREQUEST.fields_by_name["parent"]._options = None @@ -3342,8 +3345,8 @@ index=0, serialized_options=b"\312A\034bigtableadmin.googleapis.com\322A\273\002https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", create_key=_descriptor._internal_create_key, - serialized_start=4604, - serialized_end=9284, + serialized_start=4689, + serialized_end=9449, methods=[ _descriptor.MethodDescriptor( name="CreateTable", @@ -3482,7 +3485,7 @@ containing_service=None, input_type=_CREATEBACKUPREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\006backup\312A\036\n\006Backup\022\024CreateBackupMetadata\332A\027parent,backup_id,backup', + serialized_options=b'\202\323\344\223\002@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\006backup\332A\027parent,backup_id,backup\312A\036\n\006Backup\022\024CreateBackupMetadata', create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( @@ -3542,7 +3545,7 @@ containing_service=None, input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\002@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\001*\332A\010resource', + serialized_options=b'\202\323\344\223\002\216\001";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\001*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy:\001*\332A\010resource', create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py index 3c9fb609d..b9478a4d1 100644 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py @@ -286,14 +286,14 @@ def DeleteSnapshot(self, request, context): raise NotImplementedError("Method not implemented!") def CreateBackup(self, request, context): - """Starts creating a new Cloud Bigtable Backup. The returned backup + """Starts creating a new Cloud Bigtable Backup. The returned backup [long-running operation][google.longrunning.Operation] can be used to track creation of the backup. The [metadata][google.longrunning.Operation.metadata] field type is [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The [response][google.longrunning.Operation.response] field type is - [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the - returned operation will stop the creation and delete the backup. + [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the + creation and delete the backup. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -330,11 +330,11 @@ def ListBackups(self, request, context): def RestoreTable(self, request, context): """Create a new table by restoring from a completed backup. The new table - must be in the same instance as the instance containing the backup. The + must be in the same instance as the instance containing the backup. The returned table [long-running operation][google.longrunning.Operation] can - be used to track the progress of the operation, and to cancel it. The + be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. """ @@ -343,7 +343,7 @@ def RestoreTable(self, request, context): raise NotImplementedError("Method not implemented!") def GetIamPolicy(self, request, context): - """Gets the access control policy for a resource. + """Gets the access control policy for a Table or Backup resource. Returns an empty policy if the resource exists but does not have a policy set. """ @@ -360,7 +360,7 @@ def SetIamPolicy(self, request, context): raise NotImplementedError("Method not implemented!") def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified table resource. + """Returns permissions that the caller has on the specified Table or Backup resource. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") diff --git a/synth.metadata b/synth.metadata index 37252488b..489a8a064 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "20b11dfe4538cd5da7b4c3dd7d2bf5b9922ff3ed", - "internalRef": "338646463" + "sha": "ccd6462d31e6422fd188b6590aa8d0ad03e7d9a3", + "internalRef": "339464550" } }, { diff --git a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py index 035b5aa00..427e05c9b 100644 --- a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -774,12 +774,13 @@ def test_delete_app_profile(self): # Setup Request name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + ignore_warnings = True - client.delete_app_profile(name) + client.delete_app_profile(name, ignore_warnings) assert len(channel.requests) == 1 expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name + name=name, ignore_warnings=ignore_warnings ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -794,9 +795,10 @@ def test_delete_app_profile_exception(self): # Setup request name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + ignore_warnings = True with pytest.raises(CustomException): - client.delete_app_profile(name) + client.delete_app_profile(name, ignore_warnings) def test_get_iam_policy(self): # Setup Expected Response diff --git a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py index 72719a0de..48e67ae22 100644 --- a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -1010,12 +1010,18 @@ def test_restore_table(self): create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() - response = client.restore_table() + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + + response = client.restore_table(parent, table_id) result = response.result() assert expected_response == result assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.RestoreTableRequest() + expected_request = bigtable_table_admin_pb2.RestoreTableRequest( + parent=parent, table_id=table_id + ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -1034,6 +1040,10 @@ def test_restore_table_exception(self): create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() - response = client.restore_table() + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + + response = client.restore_table(parent, table_id) exception = response.exception() assert exception.errors[0] == error From 23e2d1ad902d9788bedce71327347457b67e944d Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 13 Feb 2021 06:07:51 -0800 Subject: [PATCH 07/11] fix!: removed ImportAgentResponse which was not ready for use yet. feat: added labels field and description field into Intent and Dtmf input to QueryInput PiperOrigin-RevId: 339485389 Source-Author: Google APIs Source-Date: Wed Oct 28 10:20:50 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 52eaab535740117d17b81a43749cf2a16110c301 Source-Link: https://github.com/googleapis/googleapis/commit/52eaab535740117d17b81a43749cf2a16110c301 --- synth.metadata | 9 +- .../unit/gapic/v2/test_bigtable_client_v2.py | 316 ----- .../test_bigtable_instance_admin_client_v2.py | 926 --------------- .../v2/test_bigtable_table_admin_client_v2.py | 1049 ----------------- 4 files changed, 3 insertions(+), 2297 deletions(-) delete mode 100644 tests/unit/gapic/v2/test_bigtable_client_v2.py delete mode 100644 tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py delete mode 100644 tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py diff --git a/synth.metadata b/synth.metadata index 489a8a064..71ea30bb3 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "ccd6462d31e6422fd188b6590aa8d0ad03e7d9a3", - "internalRef": "339464550" + "sha": "52eaab535740117d17b81a43749cf2a16110c301", + "internalRef": "339485389" } }, { @@ -167,9 +167,6 @@ "scripts/readme-gen/templates/install_deps.tmpl.rst", "scripts/readme-gen/templates/install_portaudio.tmpl.rst", "setup.cfg", - "testing/.gitignore", - "tests/unit/gapic/v2/test_bigtable_client_v2.py", - "tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py", - "tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py" + "testing/.gitignore" ] } \ No newline at end of file diff --git a/tests/unit/gapic/v2/test_bigtable_client_v2.py b/tests/unit/gapic/v2/test_bigtable_client_v2.py deleted file mode 100644 index 84abfecef..000000000 --- a/tests/unit/gapic/v2/test_bigtable_client_v2.py +++ /dev/null @@ -1,316 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import bigtable_v2 -from google.cloud.bigtable_v2.proto import bigtable_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def unary_stream(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableClient(object): - def test_read_rows(self): - # Setup Expected Response - last_scanned_row_key = b"-126" - expected_response = {"last_scanned_row_key": last_scanned_row_key} - expected_response = bigtable_pb2.ReadRowsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.read_rows(table_name) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.ReadRowsRequest(table_name=table_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_rows_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.read_rows(table_name) - - def test_sample_row_keys(self): - # Setup Expected Response - row_key = b"122" - offset_bytes = 889884095 - expected_response = {"row_key": row_key, "offset_bytes": offset_bytes} - expected_response = bigtable_pb2.SampleRowKeysResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.sample_row_keys(table_name) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.SampleRowKeysRequest(table_name=table_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_sample_row_keys_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.sample_row_keys(table_name) - - def test_mutate_row(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.MutateRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - mutations = [] - - response = client.mutate_row(table_name, row_key, mutations) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.MutateRowRequest( - table_name=table_name, row_key=row_key, mutations=mutations - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_mutate_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - mutations = [] - - with pytest.raises(CustomException): - client.mutate_row(table_name, row_key, mutations) - - def test_mutate_rows(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.MutateRowsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - entries = [] - - response = client.mutate_rows(table_name, entries) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, entries=entries - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_mutate_rows_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - entries = [] - - with pytest.raises(CustomException): - client.mutate_rows(table_name, entries) - - def test_check_and_mutate_row(self): - # Setup Expected Response - predicate_matched = True - expected_response = {"predicate_matched": predicate_matched} - expected_response = bigtable_pb2.CheckAndMutateRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - - response = client.check_and_mutate_row(table_name, row_key) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.CheckAndMutateRowRequest( - table_name=table_name, row_key=row_key - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_check_and_mutate_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - - with pytest.raises(CustomException): - client.check_and_mutate_row(table_name, row_key) - - def test_read_modify_write_row(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.ReadModifyWriteRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - rules = [] - - response = client.read_modify_write_row(table_name, row_key, rules) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.ReadModifyWriteRowRequest( - table_name=table_name, row_key=row_key, rules=rules - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_modify_write_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - rules = [] - - with pytest.raises(CustomException): - client.read_modify_write_row(table_name, row_key, rules) diff --git a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py deleted file mode 100644 index 427e05c9b..000000000 --- a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ /dev/null @@ -1,926 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableInstanceAdminClient(object): - def test_create_instance(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - clusters = {} - - response = client.create_instance(parent, instance_id, instance, clusters) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, instance_id=instance_id, instance=instance, clusters=clusters - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - clusters = {} - - response = client.create_instance(parent, instance_id, instance, clusters) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_instance(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - expected_response = {"name": name_2, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.get_instance(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.get_instance(name) - - def test_list_instances(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - - response = client.list_instances(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_instances_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.project_path("[PROJECT]") - - with pytest.raises(CustomException): - client.list_instances(parent) - - def test_update_instance(self): - # Setup Expected Response - name = "name3373707" - display_name_2 = "displayName21615000987" - expected_response = {"name": name, "display_name": display_name_2} - expected_response = instance_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - display_name = "displayName1615086568" - - response = client.update_instance(display_name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Instance(display_name=display_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - display_name = "displayName1615086568" - - with pytest.raises(CustomException): - client.update_instance(display_name) - - def test_partial_update_instance(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_partial_update_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - instance = {} - update_mask = {} - - response = client.partial_update_instance(instance, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_partial_update_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_partial_update_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - instance = {} - update_mask = {} - - response = client.partial_update_instance(instance, update_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_instance(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - client.delete_instance(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.delete_instance(name) - - def test_create_cluster(self): - # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} - - response = client.create_cluster(parent, cluster_id, cluster) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} - - response = client.create_cluster(parent, cluster_id, cluster) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_cluster(self): - # Setup Expected Response - name_2 = "name2-1052831874" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name_2, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - response = client.get_cluster(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetClusterRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - with pytest.raises(CustomException): - client.get_cluster(name) - - def test_list_clusters(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListClustersResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.list_clusters(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_clusters_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.list_clusters(parent) - - def test_update_cluster(self): - # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes_2 = 1623486220 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes_2, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - serve_nodes = 1288838783 - - response = client.update_cluster(serve_nodes) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Cluster(serve_nodes=serve_nodes) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - serve_nodes = 1288838783 - - response = client.update_cluster(serve_nodes) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_cluster(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - client.delete_cluster(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - with pytest.raises(CustomException): - client.delete_cluster(name) - - def test_create_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - app_profile_id = "appProfileId1262094415" - app_profile = {} - - response = client.create_app_profile(parent, app_profile_id, app_profile) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest( - parent=parent, app_profile_id=app_profile_id, app_profile=app_profile - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - app_profile_id = "appProfileId1262094415" - app_profile = {} - - with pytest.raises(CustomException): - client.create_app_profile(parent, app_profile_id, app_profile) - - def test_get_app_profile(self): - # Setup Expected Response - name_2 = "name2-1052831874" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name_2, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - response = client.get_app_profile(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - with pytest.raises(CustomException): - client.get_app_profile(name) - - def test_list_app_profiles(self): - # Setup Expected Response - next_page_token = "" - app_profiles_element = {} - app_profiles = [app_profiles_element] - expected_response = { - "next_page_token": next_page_token, - "app_profiles": app_profiles, - } - expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_app_profiles(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.app_profiles[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_app_profiles_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_app_profiles(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_app_profile", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_app_profile_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_app_profile_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_app_profile(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - ignore_warnings = True - - client.delete_app_profile(name, ignore_warnings) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name, ignore_warnings=ignore_warnings - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - ignore_warnings = True - - with pytest.raises(CustomException): - client.delete_app_profile(name, ignore_warnings) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) diff --git a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py deleted file mode 100644 index 48e67ae22..000000000 --- a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ /dev/null @@ -1,1049 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableTableAdminClient(object): - def test_create_table(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - table = {} - - response = client.create_table(parent, table_id, table) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, table_id=table_id, table=table - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - table = {} - - with pytest.raises(CustomException): - client.create_table(parent, table_id, table) - - def test_create_table_from_snapshot(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - source_snapshot = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, table_id=table_id, source_snapshot=source_snapshot - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_table_from_snapshot_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - source_snapshot = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - exception = response.exception() - assert exception.errors[0] == error - - def test_list_tables(self): - # Setup Expected Response - next_page_token = "" - tables_element = {} - tables = [tables_element] - expected_response = {"next_page_token": next_page_token, "tables": tables} - expected_response = bigtable_table_admin_pb2.ListTablesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_tables(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.tables[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListTablesRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_tables_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_tables(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.get_table(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetTableRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.get_table(name) - - def test_delete_table(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - client.delete_table(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteTableRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.delete_table(name) - - def test_modify_column_families(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - modifications = [] - - response = client.modify_column_families(name, modifications) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, modifications=modifications - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_modify_column_families_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - modifications = [] - - with pytest.raises(CustomException): - client.modify_column_families(name, modifications) - - def test_drop_row_range(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - client.drop_row_range(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DropRowRangeRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_drop_row_range_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.drop_row_range(name) - - def test_generate_consistency_token(self): - # Setup Expected Response - consistency_token = "consistencyToken-1090516718" - expected_response = {"consistency_token": consistency_token} - expected_response = bigtable_table_admin_pb2.GenerateConsistencyTokenResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.generate_consistency_token(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_generate_consistency_token_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.generate_consistency_token(name) - - def test_check_consistency(self): - # Setup Expected Response - consistent = True - expected_response = {"consistent": consistent} - expected_response = bigtable_table_admin_pb2.CheckConsistencyResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - consistency_token = "consistencyToken-1090516718" - - response = client.check_consistency(name, consistency_token) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, consistency_token=consistency_token - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_check_consistency_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - consistency_token = "consistencyToken-1090516718" - - with pytest.raises(CustomException): - client.check_consistency(name, consistency_token) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) - - def test_snapshot_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_snapshot_table", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - snapshot_id = "snapshotId-168585866" - - response = client.snapshot_table(name, cluster, snapshot_id) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, cluster=cluster, snapshot_id=snapshot_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_snapshot_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_snapshot_table_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - snapshot_id = "snapshotId-168585866" - - response = client.snapshot_table(name, cluster, snapshot_id) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_snapshot(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.get_snapshot(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_snapshot_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - with pytest.raises(CustomException): - client.get_snapshot(name) - - def test_list_snapshots(self): - # Setup Expected Response - next_page_token = "" - snapshots_element = {} - snapshots = [snapshots_element] - expected_response = {"next_page_token": next_page_token, "snapshots": snapshots} - expected_response = bigtable_table_admin_pb2.ListSnapshotsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_snapshots(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.snapshots[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_snapshots_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_snapshots(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_snapshot(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - client.delete_snapshot(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_snapshot_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - with pytest.raises(CustomException): - client.delete_snapshot(name) - - def test_create_backup(self): - # Setup Expected Response - name = "name3373707" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_backup", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - backup_id = "backupId1355353272" - backup = {} - - response = client.create_backup(parent, backup_id, backup) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateBackupRequest( - parent=parent, backup_id=backup_id, backup=backup - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_backup_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_backup_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - backup_id = "backupId1355353272" - backup = {} - - response = client.create_backup(parent, backup_id, backup) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_backup(self): - # Setup Expected Response - name_2 = "name2-1052831874" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name_2, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - response = client.get_backup(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetBackupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - with pytest.raises(CustomException): - client.get_backup(name) - - def test_list_backups(self): - # Setup Expected Response - next_page_token = "" - backups_element = {} - backups = [backups_element] - expected_response = {"next_page_token": next_page_token, "backups": backups} - expected_response = bigtable_table_admin_pb2.ListBackupsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_backups(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.backups[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListBackupsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_backups_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_backups(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_backup(self): - # Setup Expected Response - name = "name3373707" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - backup = {} - update_mask = {} - - response = client.update_backup(backup, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.UpdateBackupRequest( - backup=backup, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - backup = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_backup(backup, update_mask) - - def test_delete_backup(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - client.delete_backup(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - with pytest.raises(CustomException): - client.delete_backup(name) - - def test_restore_table(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_restore_table", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - - response = client.restore_table(parent, table_id) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.RestoreTableRequest( - parent=parent, table_id=table_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_restore_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_restore_table_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - - response = client.restore_table(parent, table_id) - exception = response.exception() - assert exception.errors[0] == error From 66f4852c573fd6462696982cc0c37a0309215df5 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 13 Feb 2021 06:08:25 -0800 Subject: [PATCH 08/11] fix!: update package names to avoid conflict with google-cloud-bigquery BREAKING CHANGE: update package names to avoid conflict with google-cloud-bigquery The google-cloud-bigquery package uses the `google.cloud.bigquery` path as a plain Python module, not a namespace package. When this package and google-cloud-bigquery are installed in the same environment, conflicts can result. PiperOrigin-RevId: 339505822 Source-Author: Google APIs Source-Date: Wed Oct 28 11:47:59 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: ae77417a40098bf64d04047760153f47daf2975f Source-Link: https://github.com/googleapis/googleapis/commit/ae77417a40098bf64d04047760153f47daf2975f --- synth.metadata | 9 +- .../unit/gapic/v2/test_bigtable_client_v2.py | 316 +++++ .../test_bigtable_instance_admin_client_v2.py | 926 +++++++++++++++ .../v2/test_bigtable_table_admin_client_v2.py | 1049 +++++++++++++++++ 4 files changed, 2297 insertions(+), 3 deletions(-) create mode 100644 tests/unit/gapic/v2/test_bigtable_client_v2.py create mode 100644 tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py create mode 100644 tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py diff --git a/synth.metadata b/synth.metadata index 71ea30bb3..dc119f8b1 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "52eaab535740117d17b81a43749cf2a16110c301", - "internalRef": "339485389" + "sha": "ae77417a40098bf64d04047760153f47daf2975f", + "internalRef": "339505822" } }, { @@ -167,6 +167,9 @@ "scripts/readme-gen/templates/install_deps.tmpl.rst", "scripts/readme-gen/templates/install_portaudio.tmpl.rst", "setup.cfg", - "testing/.gitignore" + "testing/.gitignore", + "tests/unit/gapic/v2/test_bigtable_client_v2.py", + "tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py", + "tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py" ] } \ No newline at end of file diff --git a/tests/unit/gapic/v2/test_bigtable_client_v2.py b/tests/unit/gapic/v2/test_bigtable_client_v2.py new file mode 100644 index 000000000..84abfecef --- /dev/null +++ b/tests/unit/gapic/v2/test_bigtable_client_v2.py @@ -0,0 +1,316 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests.""" + +import mock +import pytest + +from google.cloud import bigtable_v2 +from google.cloud.bigtable_v2.proto import bigtable_pb2 + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + response = None + if self.channel_stub.responses: + response = self.channel_stub.responses.pop() + + if isinstance(response, Exception): + raise response + + if response: + return response + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, method, request_serializer=None, response_deserializer=None): + return MultiCallableStub(method, self) + + def unary_stream(self, method, request_serializer=None, response_deserializer=None): + return MultiCallableStub(method, self) + + +class CustomException(Exception): + pass + + +class TestBigtableClient(object): + def test_read_rows(self): + # Setup Expected Response + last_scanned_row_key = b"-126" + expected_response = {"last_scanned_row_key": last_scanned_row_key} + expected_response = bigtable_pb2.ReadRowsResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[iter([expected_response])]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.read_rows(table_name) + resources = list(response) + assert len(resources) == 1 + assert expected_response == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.ReadRowsRequest(table_name=table_name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_read_rows_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.read_rows(table_name) + + def test_sample_row_keys(self): + # Setup Expected Response + row_key = b"122" + offset_bytes = 889884095 + expected_response = {"row_key": row_key, "offset_bytes": offset_bytes} + expected_response = bigtable_pb2.SampleRowKeysResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[iter([expected_response])]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.sample_row_keys(table_name) + resources = list(response) + assert len(resources) == 1 + assert expected_response == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.SampleRowKeysRequest(table_name=table_name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_sample_row_keys_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.sample_row_keys(table_name) + + def test_mutate_row(self): + # Setup Expected Response + expected_response = {} + expected_response = bigtable_pb2.MutateRowResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + mutations = [] + + response = client.mutate_row(table_name, row_key, mutations) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.MutateRowRequest( + table_name=table_name, row_key=row_key, mutations=mutations + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_mutate_row_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + mutations = [] + + with pytest.raises(CustomException): + client.mutate_row(table_name, row_key, mutations) + + def test_mutate_rows(self): + # Setup Expected Response + expected_response = {} + expected_response = bigtable_pb2.MutateRowsResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[iter([expected_response])]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + entries = [] + + response = client.mutate_rows(table_name, entries) + resources = list(response) + assert len(resources) == 1 + assert expected_response == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.MutateRowsRequest( + table_name=table_name, entries=entries + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_mutate_rows_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + entries = [] + + with pytest.raises(CustomException): + client.mutate_rows(table_name, entries) + + def test_check_and_mutate_row(self): + # Setup Expected Response + predicate_matched = True + expected_response = {"predicate_matched": predicate_matched} + expected_response = bigtable_pb2.CheckAndMutateRowResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + + response = client.check_and_mutate_row(table_name, row_key) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.CheckAndMutateRowRequest( + table_name=table_name, row_key=row_key + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_check_and_mutate_row_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + + with pytest.raises(CustomException): + client.check_and_mutate_row(table_name, row_key) + + def test_read_modify_write_row(self): + # Setup Expected Response + expected_response = {} + expected_response = bigtable_pb2.ReadModifyWriteRowResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup Request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + rules = [] + + response = client.read_modify_write_row(table_name, row_key, rules) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.ReadModifyWriteRowRequest( + table_name=table_name, row_key=row_key, rules=rules + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_read_modify_write_row_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() + + # Setup request + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" + rules = [] + + with pytest.raises(CustomException): + client.read_modify_write_row(table_name, row_key, rules) diff --git a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py new file mode 100644 index 000000000..427e05c9b --- /dev/null +++ b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -0,0 +1,926 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests.""" + +import mock +import pytest + +from google.rpc import status_pb2 + +from google.cloud import bigtable_admin_v2 +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import instance_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import policy_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + response = None + if self.channel_stub.responses: + response = self.channel_stub.responses.pop() + + if isinstance(response, Exception): + raise response + + if response: + return response + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, method, request_serializer=None, response_deserializer=None): + return MultiCallableStub(method, self) + + +class CustomException(Exception): + pass + + +class TestBigtableInstanceAdminClient(object): + def test_create_instance(self): + # Setup Expected Response + name = "name3373707" + display_name = "displayName1615086568" + expected_response = {"name": name, "display_name": display_name} + expected_response = instance_pb2.Instance(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_instance", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.project_path("[PROJECT]") + instance_id = "instanceId-2101995259" + instance = {} + clusters = {} + + response = client.create_instance(parent, instance_id, instance, clusters) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest( + parent=parent, instance_id=instance_id, instance=instance, clusters=clusters + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_instance_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_instance_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.project_path("[PROJECT]") + instance_id = "instanceId-2101995259" + instance = {} + clusters = {} + + response = client.create_instance(parent, instance_id, instance, clusters) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_instance(self): + # Setup Expected Response + name_2 = "name2-1052831874" + display_name = "displayName1615086568" + expected_response = {"name": name_2, "display_name": display_name} + expected_response = instance_pb2.Instance(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + response = client.get_instance(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + with pytest.raises(CustomException): + client.get_instance(name) + + def test_list_instances(self): + # Setup Expected Response + next_page_token = "nextPageToken-1530815211" + expected_response = {"next_page_token": next_page_token} + expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.project_path("[PROJECT]") + + response = client.list_instances(parent) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( + parent=parent + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_instances_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.project_path("[PROJECT]") + + with pytest.raises(CustomException): + client.list_instances(parent) + + def test_update_instance(self): + # Setup Expected Response + name = "name3373707" + display_name_2 = "displayName21615000987" + expected_response = {"name": name, "display_name": display_name_2} + expected_response = instance_pb2.Instance(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + display_name = "displayName1615086568" + + response = client.update_instance(display_name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = instance_pb2.Instance(display_name=display_name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + display_name = "displayName1615086568" + + with pytest.raises(CustomException): + client.update_instance(display_name) + + def test_partial_update_instance(self): + # Setup Expected Response + name = "name3373707" + display_name = "displayName1615086568" + expected_response = {"name": name, "display_name": display_name} + expected_response = instance_pb2.Instance(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_partial_update_instance", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + instance = {} + update_mask = {} + + response = client.partial_update_instance(instance, update_mask) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( + instance=instance, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_partial_update_instance_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_partial_update_instance_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + instance = {} + update_mask = {} + + response = client.partial_update_instance(instance, update_mask) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_instance(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + client.delete_instance(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + with pytest.raises(CustomException): + client.delete_instance(name) + + def test_create_cluster(self): + # Setup Expected Response + name = "name3373707" + location = "location1901043637" + serve_nodes = 1288838783 + expected_response = { + "name": name, + "location": location, + "serve_nodes": serve_nodes, + } + expected_response = instance_pb2.Cluster(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_cluster", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + cluster_id = "clusterId240280960" + cluster = {} + + response = client.create_cluster(parent, cluster_id, cluster) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( + parent=parent, cluster_id=cluster_id, cluster=cluster + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_cluster_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_cluster_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + cluster_id = "clusterId240280960" + cluster = {} + + response = client.create_cluster(parent, cluster_id, cluster) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_cluster(self): + # Setup Expected Response + name_2 = "name2-1052831874" + location = "location1901043637" + serve_nodes = 1288838783 + expected_response = { + "name": name_2, + "location": location, + "serve_nodes": serve_nodes, + } + expected_response = instance_pb2.Cluster(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + response = client.get_cluster(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.GetClusterRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_cluster_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + with pytest.raises(CustomException): + client.get_cluster(name) + + def test_list_clusters(self): + # Setup Expected Response + next_page_token = "nextPageToken-1530815211" + expected_response = {"next_page_token": next_page_token} + expected_response = bigtable_instance_admin_pb2.ListClustersResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + response = client.list_clusters(parent) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.ListClustersRequest( + parent=parent + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_clusters_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + with pytest.raises(CustomException): + client.list_clusters(parent) + + def test_update_cluster(self): + # Setup Expected Response + name = "name3373707" + location = "location1901043637" + serve_nodes_2 = 1623486220 + expected_response = { + "name": name, + "location": location, + "serve_nodes": serve_nodes_2, + } + expected_response = instance_pb2.Cluster(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_update_cluster", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + serve_nodes = 1288838783 + + response = client.update_cluster(serve_nodes) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = instance_pb2.Cluster(serve_nodes=serve_nodes) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_cluster_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_update_cluster_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + serve_nodes = 1288838783 + + response = client.update_cluster(serve_nodes) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_cluster(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + client.delete_cluster(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_cluster_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + with pytest.raises(CustomException): + client.delete_cluster(name) + + def test_create_app_profile(self): + # Setup Expected Response + name = "name3373707" + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name, "etag": etag, "description": description} + expected_response = instance_pb2.AppProfile(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + app_profile_id = "appProfileId1262094415" + app_profile = {} + + response = client.create_app_profile(parent, app_profile_id, app_profile) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest( + parent=parent, app_profile_id=app_profile_id, app_profile=app_profile + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_app_profile_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + app_profile_id = "appProfileId1262094415" + app_profile = {} + + with pytest.raises(CustomException): + client.create_app_profile(parent, app_profile_id, app_profile) + + def test_get_app_profile(self): + # Setup Expected Response + name_2 = "name2-1052831874" + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name_2, "etag": etag, "description": description} + expected_response = instance_pb2.AppProfile(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + + response = client.get_app_profile(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_app_profile_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + + with pytest.raises(CustomException): + client.get_app_profile(name) + + def test_list_app_profiles(self): + # Setup Expected Response + next_page_token = "" + app_profiles_element = {} + app_profiles = [app_profiles_element] + expected_response = { + "next_page_token": next_page_token, + "app_profiles": app_profiles, + } + expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_app_profiles(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.app_profiles[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest( + parent=parent + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_app_profiles_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_app_profiles(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_update_app_profile(self): + # Setup Expected Response + name = "name3373707" + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name, "etag": etag, "description": description} + expected_response = instance_pb2.AppProfile(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_update_app_profile", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + app_profile = {} + update_mask = {} + + response = client.update_app_profile(app_profile, update_mask) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( + app_profile=app_profile, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_app_profile_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_update_app_profile_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + app_profile = {} + update_mask = {} + + response = client.update_app_profile(app_profile, update_mask) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_app_profile(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + ignore_warnings = True + + client.delete_app_profile(name, ignore_warnings) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( + name=name, ignore_warnings=ignore_warnings + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_app_profile_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") + ignore_warnings = True + + with pytest.raises(CustomException): + client.delete_app_profile(name, ignore_warnings) + + def test_get_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"21" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + resource = "resource-341064690" + + response = client.get_iam_policy(resource) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + resource = "resource-341064690" + + with pytest.raises(CustomException): + client.get_iam_policy(resource) + + def test_set_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"21" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + resource = "resource-341064690" + policy = {} + + response = client.set_iam_policy(resource, policy) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, policy=policy + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_set_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + resource = "resource-341064690" + policy = {} + + with pytest.raises(CustomException): + client.set_iam_policy(resource, policy) + + def test_test_iam_permissions(self): + # Setup Expected Response + expected_response = {} + expected_response = iam_policy_pb2.TestIamPermissionsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + resource = "resource-341064690" + permissions = [] + + response = client.test_iam_permissions(resource, permissions) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_test_iam_permissions_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + resource = "resource-341064690" + permissions = [] + + with pytest.raises(CustomException): + client.test_iam_permissions(resource, permissions) diff --git a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py new file mode 100644 index 000000000..48e67ae22 --- /dev/null +++ b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -0,0 +1,1049 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit tests.""" + +import mock +import pytest + +from google.rpc import status_pb2 + +from google.cloud import bigtable_admin_v2 +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import policy_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + response = None + if self.channel_stub.responses: + response = self.channel_stub.responses.pop() + + if isinstance(response, Exception): + raise response + + if response: + return response + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, method, request_serializer=None, response_deserializer=None): + return MultiCallableStub(method, self) + + +class CustomException(Exception): + pass + + +class TestBigtableTableAdminClient(object): + def test_create_table(self): + # Setup Expected Response + name = "name3373707" + expected_response = {"name": name} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + table = {} + + response = client.create_table(parent, table_id, table) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateTableRequest( + parent=parent, table_id=table_id, table=table + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + table = {} + + with pytest.raises(CustomException): + client.create_table(parent, table_id, table) + + def test_create_table_from_snapshot(self): + # Setup Expected Response + name = "name3373707" + expected_response = {"name": name} + expected_response = table_pb2.Table(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_table_from_snapshot", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + source_snapshot = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + response = client.create_table_from_snapshot(parent, table_id, source_snapshot) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( + parent=parent, table_id=table_id, source_snapshot=source_snapshot + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_table_from_snapshot_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_table_from_snapshot_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + source_snapshot = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + response = client.create_table_from_snapshot(parent, table_id, source_snapshot) + exception = response.exception() + assert exception.errors[0] == error + + def test_list_tables(self): + # Setup Expected Response + next_page_token = "" + tables_element = {} + tables = [tables_element] + expected_response = {"next_page_token": next_page_token, "tables": tables} + expected_response = bigtable_table_admin_pb2.ListTablesResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_tables(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.tables[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ListTablesRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_tables_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_tables(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_get_table(self): + # Setup Expected Response + name_2 = "name2-1052831874" + expected_response = {"name": name_2} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.get_table(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GetTableRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.get_table(name) + + def test_delete_table(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + client.delete_table(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteTableRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.delete_table(name) + + def test_modify_column_families(self): + # Setup Expected Response + name_2 = "name2-1052831874" + expected_response = {"name": name_2} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + modifications = [] + + response = client.modify_column_families(name, modifications) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( + name=name, modifications=modifications + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_modify_column_families_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + modifications = [] + + with pytest.raises(CustomException): + client.modify_column_families(name, modifications) + + def test_drop_row_range(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + client.drop_row_range(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DropRowRangeRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_drop_row_range_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.drop_row_range(name) + + def test_generate_consistency_token(self): + # Setup Expected Response + consistency_token = "consistencyToken-1090516718" + expected_response = {"consistency_token": consistency_token} + expected_response = bigtable_table_admin_pb2.GenerateConsistencyTokenResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.generate_consistency_token(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( + name=name + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_generate_consistency_token_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.generate_consistency_token(name) + + def test_check_consistency(self): + # Setup Expected Response + consistent = True + expected_response = {"consistent": consistent} + expected_response = bigtable_table_admin_pb2.CheckConsistencyResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + consistency_token = "consistencyToken-1090516718" + + response = client.check_consistency(name, consistency_token) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CheckConsistencyRequest( + name=name, consistency_token=consistency_token + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_check_consistency_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + consistency_token = "consistencyToken-1090516718" + + with pytest.raises(CustomException): + client.check_consistency(name, consistency_token) + + def test_get_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"21" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + resource = "resource-341064690" + + response = client.get_iam_policy(resource) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + resource = "resource-341064690" + + with pytest.raises(CustomException): + client.get_iam_policy(resource) + + def test_set_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"21" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + resource = "resource-341064690" + policy = {} + + response = client.set_iam_policy(resource, policy) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, policy=policy + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_set_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + resource = "resource-341064690" + policy = {} + + with pytest.raises(CustomException): + client.set_iam_policy(resource, policy) + + def test_test_iam_permissions(self): + # Setup Expected Response + expected_response = {} + expected_response = iam_policy_pb2.TestIamPermissionsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + resource = "resource-341064690" + permissions = [] + + response = client.test_iam_permissions(resource, permissions) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_test_iam_permissions_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + resource = "resource-341064690" + permissions = [] + + with pytest.raises(CustomException): + client.test_iam_permissions(resource, permissions) + + def test_snapshot_table(self): + # Setup Expected Response + name_2 = "name2-1052831874" + data_size_bytes = 2110122398 + description = "description-1724546052" + expected_response = { + "name": name_2, + "data_size_bytes": data_size_bytes, + "description": description, + } + expected_response = table_pb2.Snapshot(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_snapshot_table", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + snapshot_id = "snapshotId-168585866" + + response = client.snapshot_table(name, cluster, snapshot_id) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( + name=name, cluster=cluster, snapshot_id=snapshot_id + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_snapshot_table_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_snapshot_table_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + snapshot_id = "snapshotId-168585866" + + response = client.snapshot_table(name, cluster, snapshot_id) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_snapshot(self): + # Setup Expected Response + name_2 = "name2-1052831874" + data_size_bytes = 2110122398 + description = "description-1724546052" + expected_response = { + "name": name_2, + "data_size_bytes": data_size_bytes, + "description": description, + } + expected_response = table_pb2.Snapshot(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + response = client.get_snapshot(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_snapshot_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + with pytest.raises(CustomException): + client.get_snapshot(name) + + def test_list_snapshots(self): + # Setup Expected Response + next_page_token = "" + snapshots_element = {} + snapshots = [snapshots_element] + expected_response = {"next_page_token": next_page_token, "snapshots": snapshots} + expected_response = bigtable_table_admin_pb2.ListSnapshotsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_snapshots(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.snapshots[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_snapshots_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_snapshots(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_delete_snapshot(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + client.delete_snapshot(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_snapshot_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) + + with pytest.raises(CustomException): + client.delete_snapshot(name) + + def test_create_backup(self): + # Setup Expected Response + name = "name3373707" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_backup", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + backup_id = "backupId1355353272" + backup = {} + + response = client.create_backup(parent, backup_id, backup) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateBackupRequest( + parent=parent, backup_id=backup_id, backup=backup + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_backup_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_backup_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + backup_id = "backupId1355353272" + backup = {} + + response = client.create_backup(parent, backup_id, backup) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_backup(self): + # Setup Expected Response + name_2 = "name2-1052831874" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name_2, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + response = client.get_backup(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GetBackupRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + with pytest.raises(CustomException): + client.get_backup(name) + + def test_list_backups(self): + # Setup Expected Response + next_page_token = "" + backups_element = {} + backups = [backups_element] + expected_response = {"next_page_token": next_page_token, "backups": backups} + expected_response = bigtable_table_admin_pb2.ListBackupsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_backups(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.backups[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ListBackupsRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_backups_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_backups(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_update_backup(self): + # Setup Expected Response + name = "name3373707" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + backup = {} + update_mask = {} + + response = client.update_backup(backup, update_mask) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.UpdateBackupRequest( + backup=backup, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + backup = {} + update_mask = {} + + with pytest.raises(CustomException): + client.update_backup(backup, update_mask) + + def test_delete_backup(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + client.delete_backup(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + with pytest.raises(CustomException): + client.delete_backup(name) + + def test_restore_table(self): + # Setup Expected Response + name = "name3373707" + expected_response = {"name": name} + expected_response = table_pb2.Table(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_restore_table", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + + response = client.restore_table(parent, table_id) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.RestoreTableRequest( + parent=parent, table_id=table_id + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_restore_table_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_restore_table_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + + response = client.restore_table(parent, table_id) + exception = response.exception() + assert exception.errors[0] == error From b619ebe7f4b4d9aa79cf2df9a6cf02f9abbf277d Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 13 Feb 2021 06:13:21 -0800 Subject: [PATCH 09/11] feat: migrate bigtable retry/timeout settings to gRPC's service configs Committer: @miraleung PiperOrigin-RevId: 346894665 Source-Author: Google APIs Source-Date: Thu Dec 10 16:55:31 2020 -0800 Source-Repo: googleapis/googleapis Source-Sha: cbbd3170bcf217e36ae72f4ac522449bf861346f Source-Link: https://github.com/googleapis/googleapis/commit/cbbd3170bcf217e36ae72f4ac522449bf861346f --- .../gapic/bigtable_instance_admin_client.py | 722 +++++----- .../bigtable_instance_admin_client_config.py | 104 +- .../gapic/bigtable_table_admin_client.py | 1230 ++++++++--------- .../bigtable_table_admin_client_config.py | 135 +- .../bigtable_instance_admin_grpc_transport.py | 104 +- .../bigtable_table_admin_grpc_transport.py | 204 +-- .../gapic/bigtable_client_config.py | 59 +- synth.metadata | 4 +- .../test_bigtable_instance_admin_client_v2.py | 430 +++--- .../v2/test_bigtable_table_admin_client_v2.py | 656 ++++----- 10 files changed, 1788 insertions(+), 1860 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index bdb4e2f28..202f2cacf 100644 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -364,28 +364,50 @@ def create_instance( metadata_type=bigtable_instance_admin_pb2.CreateInstanceMetadata, ) - def get_instance( + def partial_update_instance( self, - name, + instance, + update_mask, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets information about an instance. + Partially updates an instance within a project. This method can modify all + fields of an Instance and is the preferred way to update an Instance. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> # TODO: Initialize `instance`: + >>> instance = {} >>> - >>> response = client.get_instance(name) + >>> # TODO: Initialize `update_mask`: + >>> update_mask = {} + >>> + >>> response = client.partial_update_instance(instance, update_mask) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - name (str): Required. The unique name of the requested instance. Values are of - the form ``projects/{project}/instances/{instance}``. + instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): Required. The Instance which will (partially) replace the current value. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Instance` + update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of Instance fields which should be replaced. + Must be explicitly set. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -396,7 +418,7 @@ def get_instance( that is provided to the method. Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -406,22 +428,24 @@ def get_instance( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_instance" not in self._inner_api_calls: + if "partial_update_instance" not in self._inner_api_calls: self._inner_api_calls[ - "get_instance" + "partial_update_instance" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_instance, - default_retry=self._method_configs["GetInstance"].retry, - default_timeout=self._method_configs["GetInstance"].timeout, + self.transport.partial_update_instance, + default_retry=self._method_configs["PartialUpdateInstance"].retry, + default_timeout=self._method_configs["PartialUpdateInstance"].timeout, client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name,) + request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( + instance=instance, update_mask=update_mask, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("instance.name", instance.name)] except AttributeError: pass else: @@ -430,34 +454,64 @@ def get_instance( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_instance"]( + operation = self._inner_api_calls["partial_update_instance"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + instance_pb2.Instance, + metadata_type=bigtable_instance_admin_pb2.UpdateInstanceMetadata, + ) - def list_instances( + def create_cluster( self, parent, - page_token=None, + cluster_id, + cluster, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Lists information about instances in a project. + Creates a cluster within an instance. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> parent = client.project_path('[PROJECT]') + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> response = client.list_instances(parent) + >>> # TODO: Initialize `cluster_id`: + >>> cluster_id = '' + >>> + >>> # TODO: Initialize `cluster`: + >>> cluster = {} + >>> + >>> response = client.create_cluster(parent, cluster_id, cluster) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - parent (str): Required. The unique name of the project for which a list of - instances is requested. Values are of the form ``projects/{project}``. - page_token (str): DEPRECATED: This field is unused and ignored. + parent (str): Required. The unique name of the instance in which to create the new + cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + cluster_id (str): Required. The ID to be used when referring to the new cluster within + its instance, e.g., just ``mycluster`` rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): Required. The cluster to be created. Fields marked ``OutputOnly`` + must be left blank. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -468,7 +522,7 @@ def list_instances( that is provided to the method. Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.ListInstancesResponse` instance. + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -478,18 +532,18 @@ def list_instances( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_instances" not in self._inner_api_calls: + if "create_cluster" not in self._inner_api_calls: self._inner_api_calls[ - "list_instances" + "create_cluster" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_instances, - default_retry=self._method_configs["ListInstances"].retry, - default_timeout=self._method_configs["ListInstances"].timeout, + self.transport.create_cluster, + default_retry=self._method_configs["CreateCluster"].retry, + default_timeout=self._method_configs["CreateCluster"].timeout, client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent, page_token=page_token, + request = bigtable_instance_admin_pb2.CreateClusterRequest( + parent=parent, cluster_id=cluster_id, cluster=cluster, ) if metadata is None: metadata = [] @@ -504,56 +558,61 @@ def list_instances( ) metadata.append(routing_metadata) - return self._inner_api_calls["list_instances"]( + operation = self._inner_api_calls["create_cluster"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + instance_pb2.Cluster, + metadata_type=bigtable_instance_admin_pb2.CreateClusterMetadata, + ) - def update_instance( + def update_cluster( self, - display_name, + serve_nodes, name=None, + location=None, state=None, - type_=None, - labels=None, + default_storage_type=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates an instance within a project. This method updates only the display - name and type for an Instance. To update other Instance properties, such as - labels, use PartialUpdateInstance. + Updates a cluster within an instance. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> # TODO: Initialize `display_name`: - >>> display_name = '' + >>> # TODO: Initialize `serve_nodes`: + >>> serve_nodes = 0 >>> - >>> response = client.update_instance(display_name) + >>> response = client.update_cluster(serve_nodes) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - display_name (str): Required. The descriptive name for this instance as it appears in UIs. - Can be changed at any time, but should be kept globally unique - to avoid confusion. - name (str): The unique name of the instance. Values are of the form - ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. - state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the instance. - type_ (~google.cloud.bigtable_admin_v2.types.Type): The type of the instance. Defaults to ``PRODUCTION``. - labels (dict[str -> str]): Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. They can be used to filter resources and - aggregate metrics. - - - Label keys must be between 1 and 63 characters long and must conform - to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - - Label values must be between 0 and 63 characters long and must - conform to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given resource. - - Keys and values must both be under 128 bytes. + serve_nodes (int): Required. The number of nodes allocated to this cluster. More nodes enable + higher throughput and more consistent performance. + name (str): The unique name of the cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. + location (str): (``CreationOnly``) The location where this cluster's nodes and + storage reside. For best performance, clients should be located as close + as possible to this cluster. Currently only zones are supported, so + values should be of the form ``projects/{project}/locations/{zone}``. + state (~google.cloud.bigtable_admin_v2.types.State): The current state of the cluster. + default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) The type of storage used by this cluster to serve + its parent instance's tables, unless explicitly overridden. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -564,7 +623,7 @@ def update_instance( that is provided to the method. Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -574,22 +633,22 @@ def update_instance( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "update_instance" not in self._inner_api_calls: + if "update_cluster" not in self._inner_api_calls: self._inner_api_calls[ - "update_instance" + "update_cluster" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_instance, - default_retry=self._method_configs["UpdateInstance"].retry, - default_timeout=self._method_configs["UpdateInstance"].timeout, + self.transport.update_cluster, + default_retry=self._method_configs["UpdateCluster"].retry, + default_timeout=self._method_configs["UpdateCluster"].timeout, client_info=self._client_info, ) - request = instance_pb2.Instance( - display_name=display_name, + request = instance_pb2.Cluster( + serve_nodes=serve_nodes, name=name, + location=location, state=state, - type=type_, - labels=labels, + default_storage_type=default_storage_type, ) if metadata is None: metadata = [] @@ -604,34 +663,40 @@ def update_instance( ) metadata.append(routing_metadata) - return self._inner_api_calls["update_instance"]( + operation = self._inner_api_calls["update_cluster"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + instance_pb2.Cluster, + metadata_type=bigtable_instance_admin_pb2.UpdateClusterMetadata, + ) - def partial_update_instance( + def update_app_profile( self, - instance, + app_profile, update_mask, + ignore_warnings=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Partially updates an instance within a project. This method can modify all - fields of an Instance and is the preferred way to update an Instance. + Updates an app profile within an instance. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} + >>> # TODO: Initialize `app_profile`: + >>> app_profile = {} >>> >>> # TODO: Initialize `update_mask`: >>> update_mask = {} >>> - >>> response = client.partial_update_instance(instance, update_mask) + >>> response = client.update_app_profile(app_profile, update_mask) >>> >>> def callback(operation_future): ... # Handle result. @@ -643,15 +708,16 @@ def partial_update_instance( >>> metadata = response.metadata() Args: - instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): Required. The Instance which will (partially) replace the current value. + app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): Required. The app profile which will (partially) replace the current value. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Instance` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of Instance fields which should be replaced. - Must be explicitly set. + message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` + update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of app profile fields which should be replaced. + If unset, all fields will be replaced. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` + ignore_warnings (bool): If true, ignore safety checks when updating the app profile. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -672,24 +738,26 @@ def partial_update_instance( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "partial_update_instance" not in self._inner_api_calls: + if "update_app_profile" not in self._inner_api_calls: self._inner_api_calls[ - "partial_update_instance" + "update_app_profile" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.partial_update_instance, - default_retry=self._method_configs["PartialUpdateInstance"].retry, - default_timeout=self._method_configs["PartialUpdateInstance"].timeout, + self.transport.update_app_profile, + default_retry=self._method_configs["UpdateAppProfile"].retry, + default_timeout=self._method_configs["UpdateAppProfile"].timeout, client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask, + request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( + app_profile=app_profile, + update_mask=update_mask, + ignore_warnings=ignore_warnings, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("instance.name", instance.name)] + routing_header = [("app_profile.name", app_profile.name)] except AttributeError: pass else: @@ -698,17 +766,17 @@ def partial_update_instance( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["partial_update_instance"]( + operation = self._inner_api_calls["update_app_profile"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, - instance_pb2.Instance, - metadata_type=bigtable_instance_admin_pb2.UpdateInstanceMetadata, + instance_pb2.AppProfile, + metadata_type=bigtable_instance_admin_pb2.UpdateAppProfileMetadata, ) - def delete_instance( + def get_instance( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, @@ -716,7 +784,7 @@ def delete_instance( metadata=None, ): """ - Delete an instance from a project. + Gets information about an instance. Example: >>> from google.cloud import bigtable_admin_v2 @@ -725,11 +793,11 @@ def delete_instance( >>> >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> client.delete_instance(name) + >>> response = client.get_instance(name) Args: - name (str): Required. The unique name of the instance to be deleted. Values are - of the form ``projects/{project}/instances/{instance}``. + name (str): Required. The unique name of the requested instance. Values are of + the form ``projects/{project}/instances/{instance}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -739,6 +807,9 @@ def delete_instance( metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. + Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. @@ -747,17 +818,17 @@ def delete_instance( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "delete_instance" not in self._inner_api_calls: + if "get_instance" not in self._inner_api_calls: self._inner_api_calls[ - "delete_instance" + "get_instance" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_instance, - default_retry=self._method_configs["DeleteInstance"].retry, - default_timeout=self._method_configs["DeleteInstance"].timeout, + self.transport.get_instance, + default_retry=self._method_configs["GetInstance"].retry, + default_timeout=self._method_configs["GetInstance"].timeout, client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name,) + request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -771,58 +842,206 @@ def delete_instance( ) metadata.append(routing_metadata) - self._inner_api_calls["delete_instance"]( + return self._inner_api_calls["get_instance"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def create_cluster( + def list_instances( self, parent, - cluster_id, - cluster, + page_token=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Creates a cluster within an instance. + Lists information about instances in a project. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> parent = client.project_path('[PROJECT]') >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' + >>> response = client.list_instances(parent) + + Args: + parent (str): Required. The unique name of the project for which a list of + instances is requested. Values are of the form ``projects/{project}``. + page_token (str): DEPRECATED: This field is unused and ignored. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.ListInstancesResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "list_instances" not in self._inner_api_calls: + self._inner_api_calls[ + "list_instances" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_instances, + default_retry=self._method_configs["ListInstances"].retry, + default_timeout=self._method_configs["ListInstances"].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.ListInstancesRequest( + parent=parent, page_token=page_token, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["list_instances"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def update_instance( + self, + display_name, + name=None, + state=None, + type_=None, + labels=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Updates an instance within a project. This method updates only the display + name and type for an Instance. To update other Instance properties, such as + labels, use PartialUpdateInstance. + + Example: + >>> from google.cloud import bigtable_admin_v2 >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = {} + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> response = client.create_cluster(parent, cluster_id, cluster) + >>> # TODO: Initialize `display_name`: + >>> display_name = '' >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() + >>> response = client.update_instance(display_name) + + Args: + display_name (str): Required. The descriptive name for this instance as it appears in UIs. + Can be changed at any time, but should be kept globally unique + to avoid confusion. + name (str): The unique name of the instance. Values are of the form + ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. + state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the instance. + type_ (~google.cloud.bigtable_admin_v2.types.Type): The type of the instance. Defaults to ``PRODUCTION``. + labels (dict[str -> str]): Labels are a flexible and lightweight mechanism for organizing cloud + resources into groups that reflect a customer's organizational needs and + deployment strategies. They can be used to filter resources and + aggregate metrics. + + - Label keys must be between 1 and 63 characters long and must conform + to the regular expression: + ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. + - Label values must be between 0 and 63 characters long and must + conform to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. + - No more than 64 labels can be associated with a given resource. + - Keys and values must both be under 128 bytes. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "update_instance" not in self._inner_api_calls: + self._inner_api_calls[ + "update_instance" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_instance, + default_retry=self._method_configs["UpdateInstance"].retry, + default_timeout=self._method_configs["UpdateInstance"].timeout, + client_info=self._client_info, + ) + + request = instance_pb2.Instance( + display_name=display_name, + name=name, + state=state, + type=type_, + labels=labels, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["update_instance"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def delete_instance( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Delete an instance from a project. + + Example: + >>> from google.cloud import bigtable_admin_v2 >>> - >>> response.add_done_callback(callback) + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> client.delete_instance(name) Args: - parent (str): Required. The unique name of the instance in which to create the new - cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - cluster_id (str): Required. The ID to be used when referring to the new cluster within - its instance, e.g., just ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): Required. The cluster to be created. Fields marked ``OutputOnly`` - must be left blank. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` + name (str): Required. The unique name of the instance to be deleted. Values are + of the form ``projects/{project}/instances/{instance}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -832,9 +1051,6 @@ def create_cluster( metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. @@ -843,24 +1059,22 @@ def create_cluster( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "create_cluster" not in self._inner_api_calls: + if "delete_instance" not in self._inner_api_calls: self._inner_api_calls[ - "create_cluster" + "delete_instance" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_cluster, - default_retry=self._method_configs["CreateCluster"].retry, - default_timeout=self._method_configs["CreateCluster"].timeout, + self.transport.delete_instance, + default_retry=self._method_configs["DeleteInstance"].retry, + default_timeout=self._method_configs["DeleteInstance"].timeout, client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster, - ) + request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -869,15 +1083,9 @@ def create_cluster( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["create_cluster"]( + self._inner_api_calls["delete_instance"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Cluster, - metadata_type=bigtable_instance_admin_pb2.CreateClusterMetadata, - ) def get_cluster( self, @@ -1026,111 +1234,6 @@ def list_clusters( request, retry=retry, timeout=timeout, metadata=metadata ) - def update_cluster( - self, - serve_nodes, - name=None, - location=None, - state=None, - default_storage_type=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a cluster within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `serve_nodes`: - >>> serve_nodes = 0 - >>> - >>> response = client.update_cluster(serve_nodes) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - serve_nodes (int): Required. The number of nodes allocated to this cluster. More nodes enable - higher throughput and more consistent performance. - name (str): The unique name of the cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. - location (str): (``CreationOnly``) The location where this cluster's nodes and - storage reside. For best performance, clients should be located as close - as possible to this cluster. Currently only zones are supported, so - values should be of the form ``projects/{project}/locations/{zone}``. - state (~google.cloud.bigtable_admin_v2.types.State): The current state of the cluster. - default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) The type of storage used by this cluster to serve - its parent instance's tables, unless explicitly overridden. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "update_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_cluster, - default_retry=self._method_configs["UpdateCluster"].retry, - default_timeout=self._method_configs["UpdateCluster"].timeout, - client_info=self._client_info, - ) - - request = instance_pb2.Cluster( - serve_nodes=serve_nodes, - name=name, - location=location, - state=state, - default_storage_type=default_storage_type, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Cluster, - metadata_type=bigtable_instance_admin_pb2.UpdateClusterMetadata, - ) - def delete_cluster( self, name, @@ -1471,109 +1574,6 @@ def list_app_profiles( ) return iterator - def update_app_profile( - self, - app_profile, - update_mask, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an app profile within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `app_profile`: - >>> app_profile = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_app_profile(app_profile, update_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): Required. The app profile which will (partially) replace the current value. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of app profile fields which should be replaced. - If unset, all fields will be replaced. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` - ignore_warnings (bool): If true, ignore safety checks when updating the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "update_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_app_profile, - default_retry=self._method_configs["UpdateAppProfile"].retry, - default_timeout=self._method_configs["UpdateAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, - update_mask=update_mask, - ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("app_profile.name", app_profile.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.AppProfile, - metadata_type=bigtable_instance_admin_pb2.UpdateAppProfileMetadata, - ) - def delete_app_profile( self, name, diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py index b2ec35e01..4301a8226 100644 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py +++ b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py @@ -6,129 +6,111 @@ "non_idempotent": [], }, "retry_params": { - "idempotent_params": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.0, + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, + "initial_rpc_timeout_millis": 20000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, + "max_rpc_timeout_millis": 20000, "total_timeout_millis": 600000, - }, - "non_idempotent_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 60000, - }, - "non_idempotent_heavy_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, + } }, "methods": { "CreateInstance": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_heavy_params", + "retry_params_name": "default", }, - "GetInstance": { + "PartialUpdateInstance": { "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", }, - "ListInstances": { + "CreateCluster": { "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", }, - "UpdateInstance": { + "UpdateCluster": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", + }, + "UpdateAppProfile": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", + }, + "GetInstance": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_params_name": "default", }, - "PartialUpdateInstance": { + "ListInstances": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_params_name": "default", }, - "DeleteInstance": { + "UpdateInstance": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, - "CreateCluster": { + "DeleteInstance": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, "GetCluster": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_params_name": "default", }, "ListClusters": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_params_name": "default", }, "DeleteCluster": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, "CreateAppProfile": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, "GetAppProfile": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_params_name": "default", }, "ListAppProfiles": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_params_name": "default", }, "DeleteAppProfile": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, "GetIamPolicy": { "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", }, "SetIamPolicy": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, "TestIamPermissions": { "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", }, }, } diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index 8ef4a22f7..11019b9c1 100644 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -259,121 +259,6 @@ def __init__( self._inner_api_calls = {} # Service calls - def create_table( - self, - parent, - table_id, - table, - initial_splits=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `table_id`: - >>> table_id = '' - >>> - >>> # TODO: Initialize `table`: - >>> table = {} - >>> - >>> response = client.create_table(parent, table_id, table) - - Args: - parent (str): Required. The unique name of the instance in which to create the - table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id (str): Required. The name by which the new table should be referred to - within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. Maximum 50 characters. - table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): Required. The Table to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Table` - initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split - the table into several tablets (tablets are similar to HBase regions). - Given two split keys, ``s1`` and ``s2``, three tablets will be created, - spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. - - Example: - - - Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",`` - ``"other", "zz"]`` - - initial_split_keys := - ``["apple", "customer_1", "customer_2", "other"]`` - - Key assignment: - - - Tablet 1 ``[, apple) => {"a"}.`` - - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Split` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_table" not in self._inner_api_calls: - self._inner_api_calls[ - "create_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_table, - default_retry=self._method_configs["CreateTable"].retry, - default_timeout=self._method_configs["CreateTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, - table_id=table_id, - table=table, - initial_splits=initial_splits, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - def create_table_from_snapshot( self, parent, @@ -482,50 +367,68 @@ def create_table_from_snapshot( metadata_type=bigtable_table_admin_pb2.CreateTableFromSnapshotMetadata, ) - def list_tables( + def snapshot_table( self, - parent, - view=None, - page_size=None, + name, + cluster, + snapshot_id, + ttl=None, + description=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Lists all tables served from a specified instance. + Creates a new snapshot in the specified cluster from the specified + source table. The cluster and the table must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> cluster = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') >>> - >>> # Iterate over all results - >>> for element in client.list_tables(parent): - ... # process element - ... pass + >>> # TODO: Initialize `snapshot_id`: + >>> snapshot_id = '' >>> + >>> response = client.snapshot_table(name, cluster, snapshot_id) >>> - >>> # Alternatively: + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_tables(parent).pages: - ... for element in page: - ... # process element - ... pass + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - parent (str): Required. The unique name of the instance for which tables should be - listed. Values are of the form - ``projects/{project}/instances/{instance}``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. Only - NAME_ONLY view (default) and REPLICATION_VIEW are supported. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. + name (str): Required. The unique name of the table to have the snapshot taken. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + cluster (str): Required. The name of the cluster where the snapshot will be created + in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + snapshot_id (str): Required. The ID by which the new snapshot should be referred to + within the parent cluster, e.g., ``mysnapshot`` of the form: + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + ttl (Union[dict, ~google.cloud.bigtable_admin_v2.types.Duration]): The amount of time that the new snapshot can stay active after it is + created. Once 'ttl' expires, the snapshot will get deleted. The maximum + amount of time a snapshot can stay active is 7 days. If 'ttl' is not + specified, the default value of 24 hours will be used. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Duration` + description (str): Description of the snapshot. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -536,10 +439,7 @@ def list_tables( that is provided to the method. Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Table` instances. - You can also iterate over the pages of the response - using its `pages` property. + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -549,24 +449,28 @@ def list_tables( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_tables" not in self._inner_api_calls: + if "snapshot_table" not in self._inner_api_calls: self._inner_api_calls[ - "list_tables" + "snapshot_table" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_tables, - default_retry=self._method_configs["ListTables"].retry, - default_timeout=self._method_configs["ListTables"].timeout, + self.transport.snapshot_table, + default_retry=self._method_configs["SnapshotTable"].retry, + default_timeout=self._method_configs["SnapshotTable"].timeout, client_info=self._client_info, ) - request = bigtable_table_admin_pb2.ListTablesRequest( - parent=parent, view=view, page_size=page_size, + request = bigtable_table_admin_pb2.SnapshotTableRequest( + name=name, + cluster=cluster, + snapshot_id=snapshot_id, + ttl=ttl, + description=description, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -575,46 +479,72 @@ def list_tables( ) metadata.append(routing_metadata) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_tables"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="tables", - request_token_field="page_token", - response_token_field="next_page_token", + operation = self._inner_api_calls["snapshot_table"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + table_pb2.Snapshot, + metadata_type=bigtable_table_admin_pb2.SnapshotTableMetadata, ) - return iterator - def get_table( + def create_backup( self, - name, - view=None, + parent, + backup_id, + backup, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets metadata information about the specified table. + Starts creating a new Cloud Bigtable Backup. The returned backup + ``long-running operation`` can be used to track creation of the backup. + The ``metadata`` field type is ``CreateBackupMetadata``. The + ``response`` field type is ``Backup``, if successful. Cancelling the + returned operation will stop the creation and delete the backup. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') >>> - >>> response = client.get_table(name) + >>> # TODO: Initialize `backup_id`: + >>> backup_id = '' + >>> + >>> # TODO: Initialize `backup`: + >>> backup = {} + >>> + >>> response = client.create_backup(parent, backup_id, backup) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - name (str): Required. The unique name of the requested table. Values are of the - form ``projects/{project}/instances/{instance}/tables/{table}``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned table's fields. Defaults to - ``SCHEMA_VIEW`` if unspecified. + parent (str): Required. This must be one of the clusters in the instance in which + this table is located. The backup will be stored in this cluster. Values + are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + backup_id (str): Required. The id of the backup to be created. The ``backup_id`` + along with the parent ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup name, of the + form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in length and match the + regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to create. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Backup` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -625,7 +555,7 @@ def get_table( that is provided to the method. Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -635,22 +565,24 @@ def get_table( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_table" not in self._inner_api_calls: + if "create_backup" not in self._inner_api_calls: self._inner_api_calls[ - "get_table" + "create_backup" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_table, - default_retry=self._method_configs["GetTable"].retry, - default_timeout=self._method_configs["GetTable"].timeout, + self.transport.create_backup, + default_retry=self._method_configs["CreateBackup"].retry, + default_timeout=self._method_configs["CreateBackup"].timeout, client_info=self._client_info, ) - request = bigtable_table_admin_pb2.GetTableRequest(name=name, view=view,) + request = bigtable_table_admin_pb2.CreateBackupRequest( + parent=parent, backup_id=backup_id, backup=backup, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -659,32 +591,64 @@ def get_table( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_table"]( + operation = self._inner_api_calls["create_backup"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + table_pb2.Backup, + metadata_type=bigtable_table_admin_pb2.CreateBackupMetadata, + ) - def delete_table( + def restore_table( self, - name, + parent, + table_id, + backup=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Permanently deletes a specified table and all of its data. + Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing the + backup. The returned table ``long-running operation`` can be used to + track the progress of the operation, and to cancel it. The ``metadata`` + field type is ``RestoreTableMetadata``. The ``response`` type is + ``Table``, if successful. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> client.delete_table(name) + >>> # TODO: Initialize `table_id`: + >>> table_id = '' + >>> + >>> response = client.restore_table(parent, table_id) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - name (str): Required. The unique name of the table to be deleted. Values are of - the form ``projects/{project}/instances/{instance}/tables/{table}``. + parent (str): Required. The name of the instance in which to create the restored + table. This instance must be the parent of the source backup. Values are + of the form ``projects//instances/``. + table_id (str): Required. The id of the table to create and restore to. This table + must not already exist. The ``table_id`` appended to ``parent`` forms + the full table name of the form + ``projects//instances//tables/``. + backup (str): Name of the backup from which to restore. Values are of the form + ``projects//instances//clusters//backups/``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -694,6 +658,9 @@ def delete_table( metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. @@ -702,22 +669,28 @@ def delete_table( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "delete_table" not in self._inner_api_calls: + if "restore_table" not in self._inner_api_calls: self._inner_api_calls[ - "delete_table" + "restore_table" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_table, - default_retry=self._method_configs["DeleteTable"].retry, - default_timeout=self._method_configs["DeleteTable"].timeout, + self.transport.restore_table, + default_retry=self._method_configs["RestoreTable"].retry, + default_timeout=self._method_configs["RestoreTable"].timeout, client_info=self._client_info, ) - request = bigtable_table_admin_pb2.DeleteTableRequest(name=name,) + # Sanity check: We have some fields which are mutually exclusive; + # raise ValueError if more than one is sent. + google.api_core.protobuf_helpers.check_oneof(backup=backup,) + + request = bigtable_table_admin_pb2.RestoreTableRequest( + parent=parent, table_id=table_id, backup=backup, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -726,47 +699,78 @@ def delete_table( ) metadata.append(routing_metadata) - self._inner_api_calls["delete_table"]( + operation = self._inner_api_calls["restore_table"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + table_pb2.Table, + metadata_type=bigtable_table_admin_pb2.RestoreTableMetadata, + ) - def modify_column_families( + def create_table( self, - name, - modifications, + parent, + table_id, + table, + initial_splits=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. + Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> # TODO: Initialize `modifications`: - >>> modifications = [] + >>> # TODO: Initialize `table_id`: + >>> table_id = '' >>> - >>> response = client.modify_column_families(name, modifications) + >>> # TODO: Initialize `table`: + >>> table = {} + >>> + >>> response = client.create_table(parent, table_id, table) Args: - name (str): Required. The unique name of the table whose families should be - modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Required. Modifications to be atomically applied to the specified table's families. - Entries are applied in order, meaning that earlier modifications can be - masked by later ones (in the case of repeated updates to the same family, - for example). + parent (str): Required. The unique name of the instance in which to create the + table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id (str): Required. The name by which the new table should be referred to + within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. Maximum 50 characters. + table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): Required. The Table to create. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Modification` + message :class:`~google.cloud.bigtable_admin_v2.types.Table` + initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split + the table into several tablets (tablets are similar to HBase regions). + Given two split keys, ``s1`` and ``s2``, three tablets will be created, + spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. + + Example: + + - Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",`` + ``"other", "zz"]`` + - initial_split_keys := + ``["apple", "customer_1", "customer_2", "other"]`` + - Key assignment: + + - Tablet 1 ``[, apple) => {"a"}.`` + - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` + - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` + - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` + - Tablet 5 ``[other, ) => {"other", "zz"}.`` + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Split` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -787,24 +791,27 @@ def modify_column_families( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "modify_column_families" not in self._inner_api_calls: + if "create_table" not in self._inner_api_calls: self._inner_api_calls[ - "modify_column_families" + "create_table" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.modify_column_families, - default_retry=self._method_configs["ModifyColumnFamilies"].retry, - default_timeout=self._method_configs["ModifyColumnFamilies"].timeout, + self.transport.create_table, + default_retry=self._method_configs["CreateTable"].retry, + default_timeout=self._method_configs["CreateTable"].timeout, client_info=self._client_info, ) - request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, modifications=modifications, + request = bigtable_table_admin_pb2.CreateTableRequest( + parent=parent, + table_id=table_id, + table=table, + initial_splits=initial_splits, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -813,40 +820,54 @@ def modify_column_families( ) metadata.append(routing_metadata) - return self._inner_api_calls["modify_column_families"]( + return self._inner_api_calls["create_table"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def drop_row_range( + def list_tables( self, - name, - row_key_prefix=None, - delete_all_data_from_table=None, + parent, + view=None, + page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. + Lists all tables served from a specified instance. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> client.drop_row_range(name) + >>> # Iterate over all results + >>> for element in client.list_tables(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_tables(parent).pages: + ... for element in page: + ... # process element + ... pass Args: - name (str): Required. The unique name of the table on which to drop a range of - rows. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be - zero length. - delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op. + parent (str): Required. The unique name of the instance for which tables should be + listed. Values are of the form + ``projects/{project}/instances/{instance}``. + view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. Only + NAME_ONLY view (default) and REPLICATION_VIEW are supported. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -856,6 +877,12 @@ def drop_row_range( metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. + Returns: + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Table` instances. + You can also iterate over the pages of the response + using its `pages` property. + Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. @@ -864,33 +891,24 @@ def drop_row_range( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "drop_row_range" not in self._inner_api_calls: + if "list_tables" not in self._inner_api_calls: self._inner_api_calls[ - "drop_row_range" + "list_tables" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.drop_row_range, - default_retry=self._method_configs["DropRowRange"].retry, - default_timeout=self._method_configs["DropRowRange"].timeout, + self.transport.list_tables, + default_retry=self._method_configs["ListTables"].retry, + default_timeout=self._method_configs["ListTables"].timeout, client_info=self._client_info, ) - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - row_key_prefix=row_key_prefix, - delete_all_data_from_table=delete_all_data_from_table, - ) - - request = bigtable_table_admin_pb2.DropRowRangeRequest( - name=name, - row_key_prefix=row_key_prefix, - delete_all_data_from_table=delete_all_data_from_table, + request = bigtable_table_admin_pb2.ListTablesRequest( + parent=parent, view=view, page_size=page_size, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -899,22 +917,31 @@ def drop_row_range( ) metadata.append(routing_metadata) - self._inner_api_calls["drop_row_range"]( - request, retry=retry, timeout=timeout, metadata=metadata + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_tables"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="tables", + request_token_field="page_token", + response_token_field="next_page_token", ) + return iterator - def generate_consistency_token( + def get_table( self, name, + view=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. + Gets metadata information about the specified table. Example: >>> from google.cloud import bigtable_admin_v2 @@ -923,12 +950,13 @@ def generate_consistency_token( >>> >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> - >>> response = client.generate_consistency_token(name) + >>> response = client.get_table(name) Args: - name (str): Required. The unique name of the Table for which to create a - consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. + name (str): Required. The unique name of the requested table. Values are of the + form ``projects/{project}/instances/{instance}/tables/{table}``. + view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned table's fields. Defaults to + ``SCHEMA_VIEW`` if unspecified. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -939,7 +967,7 @@ def generate_consistency_token( that is provided to the method. Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse` instance. + A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -949,19 +977,17 @@ def generate_consistency_token( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "generate_consistency_token" not in self._inner_api_calls: + if "get_table" not in self._inner_api_calls: self._inner_api_calls[ - "generate_consistency_token" + "get_table" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.generate_consistency_token, - default_retry=self._method_configs["GenerateConsistencyToken"].retry, - default_timeout=self._method_configs[ - "GenerateConsistencyToken" - ].timeout, + self.transport.get_table, + default_retry=self._method_configs["GetTable"].retry, + default_timeout=self._method_configs["GetTable"].timeout, client_info=self._client_info, ) - request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest(name=name,) + request = bigtable_table_admin_pb2.GetTableRequest(name=name, view=view,) if metadata is None: metadata = [] metadata = list(metadata) @@ -975,22 +1001,19 @@ def generate_consistency_token( ) metadata.append(routing_metadata) - return self._inner_api_calls["generate_consistency_token"]( + return self._inner_api_calls["get_table"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def check_consistency( + def delete_table( self, name, - consistency_token, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. + Permanently deletes a specified table and all of its data. Example: >>> from google.cloud import bigtable_admin_v2 @@ -999,16 +1022,11 @@ def check_consistency( >>> >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> - >>> # TODO: Initialize `consistency_token`: - >>> consistency_token = '' - >>> - >>> response = client.check_consistency(name, consistency_token) + >>> client.delete_table(name) Args: - name (str): Required. The unique name of the Table for which to check - replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - consistency_token (str): Required. The token created using GenerateConsistencyToken for the Table. + name (str): Required. The unique name of the table to be deleted. Values are of + the form ``projects/{project}/instances/{instance}/tables/{table}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1018,9 +1036,6 @@ def check_consistency( metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse` instance. - Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. @@ -1029,19 +1044,17 @@ def check_consistency( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "check_consistency" not in self._inner_api_calls: + if "delete_table" not in self._inner_api_calls: self._inner_api_calls[ - "check_consistency" + "delete_table" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.check_consistency, - default_retry=self._method_configs["CheckConsistency"].retry, - default_timeout=self._method_configs["CheckConsistency"].timeout, + self.transport.delete_table, + default_retry=self._method_configs["DeleteTable"].retry, + default_timeout=self._method_configs["DeleteTable"].timeout, client_info=self._client_info, ) - request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, consistency_token=consistency_token, - ) + request = bigtable_table_admin_pb2.DeleteTableRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1055,41 +1068,47 @@ def check_consistency( ) metadata.append(routing_metadata) - return self._inner_api_calls["check_consistency"]( + self._inner_api_calls["delete_table"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def get_iam_policy( + def modify_column_families( self, - resource, - options_=None, + name, + modifications, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets the access control policy for a Table or Backup resource. - Returns an empty policy if the resource exists but does not have a policy - set. + Performs a series of column family modifications on the specified table. + Either all or none of the modifications will occur before this method + returns, but data requests received prior to that point may see a table + where only some modifications have taken effect. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> - >>> response = client.get_iam_policy(resource) + >>> # TODO: Initialize `modifications`: + >>> modifications = [] + >>> + >>> response = client.modify_column_families(name, modifications) Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. + name (str): Required. The unique name of the table whose families should be + modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Required. Modifications to be atomically applied to the specified table's families. + Entries are applied in order, meaning that earlier modifications can be + masked by later ones (in the case of repeated updates to the same family, + for example). If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` + message :class:`~google.cloud.bigtable_admin_v2.types.Modification` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1100,7 +1119,7 @@ def get_iam_policy( that is provided to the method. Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. + A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1110,24 +1129,24 @@ def get_iam_policy( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: + if "modify_column_families" not in self._inner_api_calls: self._inner_api_calls[ - "get_iam_policy" + "modify_column_families" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, + self.transport.modify_column_families, + default_retry=self._method_configs["ModifyColumnFamilies"].retry, + default_timeout=self._method_configs["ModifyColumnFamilies"].timeout, client_info=self._client_info, ) - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_, + request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( + name=name, modifications=modifications, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("resource", resource)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -1136,45 +1155,40 @@ def get_iam_policy( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_iam_policy"]( + return self._inner_api_calls["modify_column_families"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def set_iam_policy( + def drop_row_range( self, - resource, - policy, + name, + row_key_prefix=None, + delete_all_data_from_table=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Sets the access control policy on a Table or Backup resource. - Replaces any existing policy. + Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> - >>> response = client.set_iam_policy(resource, policy) + >>> client.drop_row_range(name) Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Policy` + name (str): Required. The unique name of the table on which to drop a range of + rows. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be + zero length. + delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1184,9 +1198,6 @@ def set_iam_policy( metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. @@ -1195,22 +1206,33 @@ def set_iam_policy( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: + if "drop_row_range" not in self._inner_api_calls: self._inner_api_calls[ - "set_iam_policy" + "drop_row_range" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, + self.transport.drop_row_range, + default_retry=self._method_configs["DropRowRange"].retry, + default_timeout=self._method_configs["DropRowRange"].timeout, client_info=self._client_info, ) - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy,) + # Sanity check: We have some fields which are mutually exclusive; + # raise ValueError if more than one is sent. + google.api_core.protobuf_helpers.check_oneof( + row_key_prefix=row_key_prefix, + delete_all_data_from_table=delete_all_data_from_table, + ) + + request = bigtable_table_admin_pb2.DropRowRangeRequest( + name=name, + row_key_prefix=row_key_prefix, + delete_all_data_from_table=delete_all_data_from_table, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("resource", resource)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -1219,41 +1241,36 @@ def set_iam_policy( ) metadata.append(routing_metadata) - return self._inner_api_calls["set_iam_policy"]( + self._inner_api_calls["drop_row_range"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def test_iam_permissions( + def generate_consistency_token( self, - resource, - permissions, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Returns permissions that the caller has on the specified Table or Backup resource. + Generates a consistency token for a Table, which can be used in + CheckConsistency to check whether mutations to the table that finished + before this call started have been replicated. The tokens will be available + for 90 days. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> - >>> response = client.test_iam_permissions(resource, permissions) + >>> response = client.generate_consistency_token(name) Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions - with wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. + name (str): Required. The unique name of the Table for which to create a + consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1264,7 +1281,7 @@ def test_iam_permissions( that is provided to the method. Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. + A :class:`~google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1274,24 +1291,24 @@ def test_iam_permissions( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: + if "generate_consistency_token" not in self._inner_api_calls: self._inner_api_calls[ - "test_iam_permissions" + "generate_consistency_token" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, + self.transport.generate_consistency_token, + default_retry=self._method_configs["GenerateConsistencyToken"].retry, + default_timeout=self._method_configs[ + "GenerateConsistencyToken" + ].timeout, client_info=self._client_info, ) - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions, - ) + request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("resource", resource)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -1300,30 +1317,22 @@ def test_iam_permissions( ) metadata.append(routing_metadata) - return self._inner_api_calls["test_iam_permissions"]( + return self._inner_api_calls["generate_consistency_token"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def snapshot_table( + def check_consistency( self, name, - cluster, - snapshot_id, - ttl=None, - description=None, + consistency_token, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. + Checks replication consistency based on a consistency token, that is, if + replication has caught up based on the conditions specified in the token + and the check request. Example: >>> from google.cloud import bigtable_admin_v2 @@ -1331,41 +1340,17 @@ def snapshot_table( >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> cluster = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # TODO: Initialize `snapshot_id`: - >>> snapshot_id = '' - >>> - >>> response = client.snapshot_table(name, cluster, snapshot_id) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() >>> - >>> response.add_done_callback(callback) + >>> # TODO: Initialize `consistency_token`: + >>> consistency_token = '' >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.check_consistency(name, consistency_token) Args: - name (str): Required. The unique name of the table to have the snapshot taken. - Values are of the form + name (str): Required. The unique name of the Table for which to check + replication consistency. Values are of the form ``projects/{project}/instances/{instance}/tables/{table}``. - cluster (str): Required. The name of the cluster where the snapshot will be created - in. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - snapshot_id (str): Required. The ID by which the new snapshot should be referred to - within the parent cluster, e.g., ``mysnapshot`` of the form: - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. - ttl (Union[dict, ~google.cloud.bigtable_admin_v2.types.Duration]): The amount of time that the new snapshot can stay active after it is - created. Once 'ttl' expires, the snapshot will get deleted. The maximum - amount of time a snapshot can stay active is 7 days. If 'ttl' is not - specified, the default value of 24 hours will be used. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Duration` - description (str): Description of the snapshot. + consistency_token (str): Required. The token created using GenerateConsistencyToken for the Table. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1376,7 +1361,7 @@ def snapshot_table( that is provided to the method. Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + A :class:`~google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1386,22 +1371,18 @@ def snapshot_table( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "snapshot_table" not in self._inner_api_calls: + if "check_consistency" not in self._inner_api_calls: self._inner_api_calls[ - "snapshot_table" + "check_consistency" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.snapshot_table, - default_retry=self._method_configs["SnapshotTable"].retry, - default_timeout=self._method_configs["SnapshotTable"].timeout, + self.transport.check_consistency, + default_retry=self._method_configs["CheckConsistency"].retry, + default_timeout=self._method_configs["CheckConsistency"].timeout, client_info=self._client_info, ) - request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, - cluster=cluster, - snapshot_id=snapshot_id, - ttl=ttl, - description=description, + request = bigtable_table_admin_pb2.CheckConsistencyRequest( + name=name, consistency_token=consistency_token, ) if metadata is None: metadata = [] @@ -1416,15 +1397,9 @@ def snapshot_table( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["snapshot_table"]( + return self._inner_api_calls["check_consistency"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Snapshot, - metadata_type=bigtable_table_admin_pb2.SnapshotTableMetadata, - ) def get_snapshot( self, @@ -1690,62 +1665,117 @@ def delete_snapshot( request, retry=retry, timeout=timeout, metadata=metadata ) - def create_backup( + def get_backup( self, - parent, - backup_id, - backup, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Starts creating a new Cloud Bigtable Backup. The returned backup - ``long-running operation`` can be used to track creation of the backup. - The ``metadata`` field type is ``CreateBackupMetadata``. The - ``response`` field type is ``Backup``, if successful. Cancelling the - returned operation will stop the creation and delete the backup. + Gets metadata on a pending or completed Cloud Bigtable Backup. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') >>> - >>> # TODO: Initialize `backup_id`: - >>> backup_id = '' + >>> response = client.get_backup(name) + + Args: + name (str): Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "get_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_backup, + default_retry=self._method_configs["GetBackup"].retry, + default_timeout=self._method_configs["GetBackup"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.GetBackupRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def update_backup( + self, + backup, + update_mask, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Updates a pending or completed Cloud Bigtable Backup. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> >>> # TODO: Initialize `backup`: >>> backup = {} >>> - >>> response = client.create_backup(parent, backup_id, backup) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) + >>> # TODO: Initialize `update_mask`: + >>> update_mask = {} >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.update_backup(backup, update_mask) + + Args: + backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to update. ``backup.name``, and the fields to + be updated as specified by ``update_mask`` are required. Other fields + are ignored. Update is only supported for the following fields: - Args: - parent (str): Required. This must be one of the clusters in the instance in which - this table is located. The backup will be stored in this cluster. Values - are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - backup_id (str): Required. The id of the backup to be created. The ``backup_id`` - along with the parent ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup name, of the - form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in length and match the - regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to create. + - ``backup.expire_time``. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Backup` + update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. A mask specifying which fields (e.g. ``expire_time``) in + the Backup resource should be updated. This mask is relative to the + Backup resource, not to the request message. The field mask must always + be specified; this prevents any future fields from being erased + accidentally by clients that do not know about them. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1756,7 +1786,7 @@ def create_backup( that is provided to the method. Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1766,24 +1796,24 @@ def create_backup( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "create_backup" not in self._inner_api_calls: + if "update_backup" not in self._inner_api_calls: self._inner_api_calls[ - "create_backup" + "update_backup" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_backup, - default_retry=self._method_configs["CreateBackup"].retry, - default_timeout=self._method_configs["CreateBackup"].timeout, + self.transport.update_backup, + default_retry=self._method_configs["UpdateBackup"].retry, + default_timeout=self._method_configs["UpdateBackup"].timeout, client_info=self._client_info, ) - request = bigtable_table_admin_pb2.CreateBackupRequest( - parent=parent, backup_id=backup_id, backup=backup, + request = bigtable_table_admin_pb2.UpdateBackupRequest( + backup=backup, update_mask=update_mask, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("backup.name", backup.name)] except AttributeError: pass else: @@ -1792,17 +1822,11 @@ def create_backup( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["create_backup"]( + return self._inner_api_calls["update_backup"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Backup, - metadata_type=bigtable_table_admin_pb2.CreateBackupMetadata, - ) - def get_backup( + def delete_backup( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, @@ -1810,7 +1834,7 @@ def get_backup( metadata=None, ): """ - Gets metadata on a pending or completed Cloud Bigtable Backup. + Deletes a pending or completed Cloud Bigtable backup. Example: >>> from google.cloud import bigtable_admin_v2 @@ -1819,10 +1843,10 @@ def get_backup( >>> >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') >>> - >>> response = client.get_backup(name) + >>> client.delete_backup(name) Args: - name (str): Required. Name of the backup. Values are of the form + name (str): Required. Name of the backup to delete. Values are of the form ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will @@ -1833,9 +1857,6 @@ def get_backup( metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. - Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. @@ -1844,17 +1865,17 @@ def get_backup( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_backup" not in self._inner_api_calls: + if "delete_backup" not in self._inner_api_calls: self._inner_api_calls[ - "get_backup" + "delete_backup" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_backup, - default_retry=self._method_configs["GetBackup"].retry, - default_timeout=self._method_configs["GetBackup"].timeout, + self.transport.delete_backup, + default_retry=self._method_configs["DeleteBackup"].retry, + default_timeout=self._method_configs["DeleteBackup"].timeout, client_info=self._client_info, ) - request = bigtable_table_admin_pb2.GetBackupRequest(name=name,) + request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1868,7 +1889,7 @@ def get_backup( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_backup"]( + self._inner_api_calls["delete_backup"]( request, retry=retry, timeout=timeout, metadata=metadata ) @@ -2030,47 +2051,37 @@ def list_backups( ) return iterator - def update_backup( + def get_iam_policy( self, - backup, - update_mask, + resource, + options_=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates a pending or completed Cloud Bigtable Backup. + Gets the access control policy for a Table or Backup resource. + Returns an empty policy if the resource exists but does not have a policy + set. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> # TODO: Initialize `backup`: - >>> backup = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} + >>> # TODO: Initialize `resource`: + >>> resource = '' >>> - >>> response = client.update_backup(backup, update_mask) + >>> response = client.get_iam_policy(resource) Args: - backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to update. ``backup.name``, and the fields to - be updated as specified by ``update_mask`` are required. Other fields - are ignored. Update is only supported for the following fields: - - - ``backup.expire_time``. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Backup` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. A mask specifying which fields (e.g. ``expire_time``) in - the Backup resource should be updated. This mask is relative to the - Backup resource, not to the request message. The field mask must always - be specified; this prevents any future fields from being erased - accidentally by clients that do not know about them. + resource (str): REQUIRED: The resource for which the policy is being requested. + See the operation documentation for the appropriate value for this field. + options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to + ``GetIamPolicy``. This field is only used by Cloud IAM. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` + message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2081,7 +2092,7 @@ def update_backup( that is provided to the method. Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. + A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2091,24 +2102,24 @@ def update_backup( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "update_backup" not in self._inner_api_calls: + if "get_iam_policy" not in self._inner_api_calls: self._inner_api_calls[ - "update_backup" + "get_iam_policy" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_backup, - default_retry=self._method_configs["UpdateBackup"].retry, - default_timeout=self._method_configs["UpdateBackup"].timeout, + self.transport.get_iam_policy, + default_retry=self._method_configs["GetIamPolicy"].retry, + default_timeout=self._method_configs["GetIamPolicy"].timeout, client_info=self._client_info, ) - request = bigtable_table_admin_pb2.UpdateBackupRequest( - backup=backup, update_mask=update_mask, + request = iam_policy_pb2.GetIamPolicyRequest( + resource=resource, options=options_, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("backup.name", backup.name)] + routing_header = [("resource", resource)] except AttributeError: pass else: @@ -2117,32 +2128,45 @@ def update_backup( ) metadata.append(routing_metadata) - return self._inner_api_calls["update_backup"]( + return self._inner_api_calls["get_iam_policy"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def delete_backup( + def set_iam_policy( self, - name, + resource, + policy, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Deletes a pending or completed Cloud Bigtable backup. + Sets the access control policy on a Table or Backup resource. + Replaces any existing policy. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') + >>> # TODO: Initialize `resource`: + >>> resource = '' >>> - >>> client.delete_backup(name) + >>> # TODO: Initialize `policy`: + >>> policy = {} + >>> + >>> response = client.set_iam_policy(resource, policy) Args: - name (str): Required. Name of the backup to delete. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + resource (str): REQUIRED: The resource for which the policy is being specified. + See the operation documentation for the appropriate value for this field. + policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The + size of the policy is limited to a few 10s of KB. An empty policy is a + valid policy but certain Cloud Platform services (such as Projects) + might reject them. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Policy` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2152,6 +2176,9 @@ def delete_backup( metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. + Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. @@ -2160,22 +2187,22 @@ def delete_backup( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "delete_backup" not in self._inner_api_calls: + if "set_iam_policy" not in self._inner_api_calls: self._inner_api_calls[ - "delete_backup" + "set_iam_policy" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_backup, - default_retry=self._method_configs["DeleteBackup"].retry, - default_timeout=self._method_configs["DeleteBackup"].timeout, + self.transport.set_iam_policy, + default_retry=self._method_configs["SetIamPolicy"].retry, + default_timeout=self._method_configs["SetIamPolicy"].timeout, client_info=self._client_info, ) - request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name,) + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("resource", resource)] except AttributeError: pass else: @@ -2184,58 +2211,41 @@ def delete_backup( ) metadata.append(routing_metadata) - self._inner_api_calls["delete_backup"]( + return self._inner_api_calls["set_iam_policy"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def restore_table( + def test_iam_permissions( self, - parent, - table_id, - backup=None, + resource, + permissions, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Create a new table by restoring from a completed backup. The new - table must be in the same instance as the instance containing the - backup. The returned table ``long-running operation`` can be used to - track the progress of the operation, and to cancel it. The ``metadata`` - field type is ``RestoreTableMetadata``. The ``response`` type is - ``Table``, if successful. + Returns permissions that the caller has on the specified Table or Backup resource. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `table_id`: - >>> table_id = '' - >>> - >>> response = client.restore_table(parent, table_id) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() + >>> # TODO: Initialize `resource`: + >>> resource = '' >>> - >>> response.add_done_callback(callback) + >>> # TODO: Initialize `permissions`: + >>> permissions = [] >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.test_iam_permissions(resource, permissions) Args: - parent (str): Required. The name of the instance in which to create the restored - table. This instance must be the parent of the source backup. Values are - of the form ``projects//instances/``. - table_id (str): Required. The id of the table to create and restore to. This table - must not already exist. The ``table_id`` appended to ``parent`` forms - the full table name of the form - ``projects//instances//tables/``. - backup (str): Name of the backup from which to restore. Values are of the form - ``projects//instances//clusters//backups/``. + resource (str): REQUIRED: The resource for which the policy detail is being requested. + See the operation documentation for the appropriate value for this field. + permissions (list[str]): The set of permissions to check for the ``resource``. Permissions + with wildcards (such as '*' or 'storage.*') are not allowed. For more + information see `IAM + Overview `__. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2246,7 +2256,7 @@ def restore_table( that is provided to the method. Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2256,28 +2266,24 @@ def restore_table( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "restore_table" not in self._inner_api_calls: + if "test_iam_permissions" not in self._inner_api_calls: self._inner_api_calls[ - "restore_table" + "test_iam_permissions" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.restore_table, - default_retry=self._method_configs["RestoreTable"].retry, - default_timeout=self._method_configs["RestoreTable"].timeout, + self.transport.test_iam_permissions, + default_retry=self._method_configs["TestIamPermissions"].retry, + default_timeout=self._method_configs["TestIamPermissions"].timeout, client_info=self._client_info, ) - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof(backup=backup,) - - request = bigtable_table_admin_pb2.RestoreTableRequest( - parent=parent, table_id=table_id, backup=backup, + request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("resource", resource)] except AttributeError: pass else: @@ -2286,12 +2292,6 @@ def restore_table( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["restore_table"]( + return self._inner_api_calls["test_iam_permissions"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Table, - metadata_type=bigtable_table_admin_pb2.RestoreTableMetadata, - ) diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py index db60047bd..85cc354a4 100644 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py +++ b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py @@ -6,153 +6,126 @@ "non_idempotent": [], }, "retry_params": { - "idempotent_params": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.0, + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, + "initial_rpc_timeout_millis": 20000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, + "max_rpc_timeout_millis": 20000, "total_timeout_millis": 600000, - }, - "non_idempotent_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 60000, - }, - "non_idempotent_heavy_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "drop_row_range_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, + } }, "methods": { - "CreateTable": { - "timeout_millis": 130000, + "CreateTableFromSnapshot": { + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_heavy_params", + "retry_params_name": "default", }, - "CreateTableFromSnapshot": { + "SnapshotTable": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", + }, + "CreateBackup": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", + }, + "RestoreTable": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", + }, + "CreateTable": { + "timeout_millis": 130000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", }, "ListTables": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_params_name": "default", }, "GetTable": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_params_name": "default", }, "DeleteTable": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, "ModifyColumnFamilies": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_heavy_params", + "retry_params_name": "default", }, "DropRowRange": { "timeout_millis": 900000, "retry_codes_name": "non_idempotent", - "retry_params_name": "drop_row_range_params", + "retry_params_name": "default", }, "GenerateConsistencyToken": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "CheckConsistency": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "GetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "SetIamPolicy": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "TestIamPermissions": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_params_name": "default", }, - "SnapshotTable": { + "CheckConsistency": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, "GetSnapshot": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_params_name": "default", }, "ListSnapshots": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_params_name": "default", }, "DeleteSnapshot": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, - "CreateBackup": { + "GetBackup": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default", + }, + "UpdateBackup": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, - "GetBackup": { + "DeleteBackup": { "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", }, "ListBackups": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_params_name": "default", }, - "UpdateBackup": { + "GetIamPolicy": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, - "DeleteBackup": { + "SetIamPolicy": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, - "RestoreTable": { + "TestIamPermissions": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, }, } diff --git a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py index 536629604..7ca758edc 100644 --- a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -141,124 +141,137 @@ def create_instance(self): return self._stubs["bigtable_instance_admin_stub"].CreateInstance @property - def get_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_instance`. + def partial_update_instance(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.partial_update_instance`. - Gets information about an instance. + Partially updates an instance within a project. This method can modify all + fields of an Instance and is the preferred way to update an Instance. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_instance_admin_stub"].GetInstance + return self._stubs["bigtable_instance_admin_stub"].PartialUpdateInstance @property - def list_instances(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_instances`. + def create_cluster(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_cluster`. - Lists information about instances in a project. + Creates a cluster within an instance. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_instance_admin_stub"].ListInstances + return self._stubs["bigtable_instance_admin_stub"].CreateCluster @property - def update_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_instance`. + def update_cluster(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_cluster`. - Updates an instance within a project. This method updates only the display - name and type for an Instance. To update other Instance properties, such as - labels, use PartialUpdateInstance. + Updates a cluster within an instance. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_instance_admin_stub"].UpdateInstance + return self._stubs["bigtable_instance_admin_stub"].UpdateCluster @property - def partial_update_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.partial_update_instance`. + def update_app_profile(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_app_profile`. - Partially updates an instance within a project. This method can modify all - fields of an Instance and is the preferred way to update an Instance. + Updates an app profile within an instance. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_instance_admin_stub"].PartialUpdateInstance + return self._stubs["bigtable_instance_admin_stub"].UpdateAppProfile @property - def delete_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_instance`. + def get_instance(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_instance`. - Delete an instance from a project. + Gets information about an instance. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_instance_admin_stub"].DeleteInstance + return self._stubs["bigtable_instance_admin_stub"].GetInstance @property - def create_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_cluster`. + def list_instances(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_instances`. - Creates a cluster within an instance. + Lists information about instances in a project. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_instance_admin_stub"].CreateCluster + return self._stubs["bigtable_instance_admin_stub"].ListInstances @property - def get_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_cluster`. + def update_instance(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_instance`. - Gets information about a cluster. + Updates an instance within a project. This method updates only the display + name and type for an Instance. To update other Instance properties, such as + labels, use PartialUpdateInstance. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_instance_admin_stub"].GetCluster + return self._stubs["bigtable_instance_admin_stub"].UpdateInstance @property - def list_clusters(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_clusters`. + def delete_instance(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_instance`. - Lists information about clusters in an instance. + Delete an instance from a project. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_instance_admin_stub"].ListClusters + return self._stubs["bigtable_instance_admin_stub"].DeleteInstance @property - def update_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_cluster`. + def get_cluster(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_cluster`. - Updates a cluster within an instance. + Gets information about a cluster. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_instance_admin_stub"].UpdateCluster + return self._stubs["bigtable_instance_admin_stub"].GetCluster + + @property + def list_clusters(self): + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_clusters`. + + Lists information about clusters in an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_instance_admin_stub"].ListClusters @property def delete_cluster(self): @@ -312,19 +325,6 @@ def list_app_profiles(self): """ return self._stubs["bigtable_instance_admin_stub"].ListAppProfiles - @property - def update_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_app_profile`. - - Updates an app profile within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateAppProfile - @property def delete_app_profile(self): """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_app_profile`. diff --git a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index c732bca97..68bee69ae 100644 --- a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -128,26 +128,31 @@ def channel(self): return self._channel @property - def create_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table`. + def create_table_from_snapshot(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table_from_snapshot`. - Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. + Creates a new table from the specified snapshot. The target table must + not exist. The snapshot and the table must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_table_admin_stub"].CreateTable + return self._stubs["bigtable_table_admin_stub"].CreateTableFromSnapshot @property - def create_table_from_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table_from_snapshot`. + def snapshot_table(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.snapshot_table`. - Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. + Creates a new snapshot in the specified cluster from the specified + source table. The cluster and the table must be in the same instance. Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This @@ -160,7 +165,57 @@ def create_table_from_snapshot(self): deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_table_admin_stub"].CreateTableFromSnapshot + return self._stubs["bigtable_table_admin_stub"].SnapshotTable + + @property + def create_backup(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_backup`. + + Starts creating a new Cloud Bigtable Backup. The returned backup + ``long-running operation`` can be used to track creation of the backup. + The ``metadata`` field type is ``CreateBackupMetadata``. The + ``response`` field type is ``Backup``, if successful. Cancelling the + returned operation will stop the creation and delete the backup. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].CreateBackup + + @property + def restore_table(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.restore_table`. + + Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing the + backup. The returned table ``long-running operation`` can be used to + track the progress of the operation, and to cancel it. The ``metadata`` + field type is ``RestoreTableMetadata``. The ``response`` type is + ``Table``, if successful. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].RestoreTable + + @property + def create_table(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table`. + + Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].CreateTable @property def list_tables(self): @@ -263,68 +318,6 @@ def check_consistency(self): """ return self._stubs["bigtable_table_admin_stub"].CheckConsistency - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_iam_policy`. - - Gets the access control policy for a Table or Backup resource. - Returns an empty policy if the resource exists but does not have a policy - set. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetIamPolicy - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.set_iam_policy`. - - Sets the access control policy on a Table or Backup resource. - Replaces any existing policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].SetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified Table or Backup resource. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].TestIamPermissions - - @property - def snapshot_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.snapshot_table`. - - Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].SnapshotTable - @property def get_snapshot(self): """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_snapshot`. @@ -383,34 +376,43 @@ def delete_snapshot(self): return self._stubs["bigtable_table_admin_stub"].DeleteSnapshot @property - def create_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_backup`. + def get_backup(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_backup`. - Starts creating a new Cloud Bigtable Backup. The returned backup - ``long-running operation`` can be used to track creation of the backup. - The ``metadata`` field type is ``CreateBackupMetadata``. The - ``response`` field type is ``Backup``, if successful. Cancelling the - returned operation will stop the creation and delete the backup. + Gets metadata on a pending or completed Cloud Bigtable Backup. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_table_admin_stub"].CreateBackup + return self._stubs["bigtable_table_admin_stub"].GetBackup @property - def get_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_backup`. + def update_backup(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.update_backup`. - Gets metadata on a pending or completed Cloud Bigtable Backup. + Updates a pending or completed Cloud Bigtable Backup. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_table_admin_stub"].GetBackup + return self._stubs["bigtable_table_admin_stub"].UpdateBackup + + @property + def delete_backup(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_backup`. + + Deletes a pending or completed Cloud Bigtable backup. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].DeleteBackup @property def list_backups(self): @@ -427,45 +429,43 @@ def list_backups(self): return self._stubs["bigtable_table_admin_stub"].ListBackups @property - def update_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.update_backup`. + def get_iam_policy(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_iam_policy`. - Updates a pending or completed Cloud Bigtable Backup. + Gets the access control policy for a Table or Backup resource. + Returns an empty policy if the resource exists but does not have a policy + set. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_table_admin_stub"].UpdateBackup + return self._stubs["bigtable_table_admin_stub"].GetIamPolicy @property - def delete_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_backup`. + def set_iam_policy(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.set_iam_policy`. - Deletes a pending or completed Cloud Bigtable backup. + Sets the access control policy on a Table or Backup resource. + Replaces any existing policy. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_table_admin_stub"].DeleteBackup + return self._stubs["bigtable_table_admin_stub"].SetIamPolicy @property - def restore_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.restore_table`. + def test_iam_permissions(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.test_iam_permissions`. - Create a new table by restoring from a completed backup. The new - table must be in the same instance as the instance containing the - backup. The returned table ``long-running operation`` can be used to - track the progress of the operation, and to cancel it. The ``metadata`` - field type is ``RestoreTableMetadata``. The ``response`` type is - ``Table``, if successful. + Returns permissions that the caller has on the specified Table or Backup resource. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["bigtable_table_admin_stub"].RestoreTable + return self._stubs["bigtable_table_admin_stub"].TestIamPermissions diff --git a/google/cloud/bigtable_v2/gapic/bigtable_client_config.py b/google/cloud/bigtable_v2/gapic/bigtable_client_config.py index 8a57847bf..965e6b90d 100644 --- a/google/cloud/bigtable_v2/gapic/bigtable_client_config.py +++ b/google/cloud/bigtable_v2/gapic/bigtable_client_config.py @@ -6,73 +6,46 @@ "non_idempotent": [], }, "retry_params": { - "idempotent_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, "initial_rpc_timeout_millis": 20000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 20000, "total_timeout_millis": 600000, - }, - "non_idempotent_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 20000, - }, - "read_rows_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 43200000, - }, - "mutate_rows_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - }, + } }, "methods": { "ReadRows": { - "timeout_millis": 43200000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "read_rows_params", + "retry_params_name": "default", }, "SampleRowKeys": { "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_codes_name": "idempotent", + "retry_params_name": "default", }, "MutateRow": { "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", }, "MutateRows": { - "timeout_millis": 600000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "mutate_rows_params", + "retry_params_name": "default", }, "CheckAndMutateRow": { - "timeout_millis": 20000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, "ReadModifyWriteRow": { - "timeout_millis": 20000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", + "retry_params_name": "default", }, }, } diff --git a/synth.metadata b/synth.metadata index dc119f8b1..1b0b9dbfe 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "ae77417a40098bf64d04047760153f47daf2975f", - "internalRef": "339505822" + "sha": "cbbd3170bcf217e36ae72f4ac522449bf861346f", + "internalRef": "346894665" } }, { diff --git a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py index 427e05c9b..626113e52 100644 --- a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -128,134 +128,191 @@ def test_create_instance_exception(self): exception = response.exception() assert exception.errors[0] == error - def test_get_instance(self): + def test_partial_update_instance(self): # Setup Expected Response - name_2 = "name2-1052831874" + name = "name3373707" display_name = "displayName1615086568" - expected_response = {"name": name_2, "display_name": display_name} + expected_response = {"name": name, "display_name": display_name} expected_response = instance_pb2.Instance(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_partial_update_instance", done=True + ) + operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") + instance = {} + update_mask = {} - response = client.get_instance(name) - assert expected_response == response + response = client.partial_update_instance(instance, update_mask) + result = response.result() + assert expected_response == result assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) + expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( + instance=instance, update_mask=update_mask + ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_get_instance_exception(self): + def test_partial_update_instance_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_partial_update_instance_exception", done=True + ) + operation.error.CopyFrom(error) + # Mock the API response - channel = ChannelStub(responses=[CustomException()]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") + # Setup Request + instance = {} + update_mask = {} - with pytest.raises(CustomException): - client.get_instance(name) + response = client.partial_update_instance(instance, update_mask) + exception = response.exception() + assert exception.errors[0] == error - def test_list_instances(self): + def test_create_cluster(self): # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( - **expected_response + name = "name3373707" + location = "location1901043637" + serve_nodes = 1288838783 + expected_response = { + "name": name, + "location": location, + "serve_nodes": serve_nodes, + } + expected_response = instance_pb2.Cluster(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_cluster", done=True ) + operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - parent = client.project_path("[PROJECT]") + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + cluster_id = "clusterId240280960" + cluster = {} - response = client.list_instances(parent) - assert expected_response == response + response = client.create_cluster(parent, cluster_id, cluster) + result = response.result() + assert expected_response == result assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent + expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( + parent=parent, cluster_id=cluster_id, cluster=cluster ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_list_instances_exception(self): + def test_create_cluster_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_cluster_exception", done=True + ) + operation.error.CopyFrom(error) + # Mock the API response - channel = ChannelStub(responses=[CustomException()]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() - # Setup request - parent = client.project_path("[PROJECT]") + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + cluster_id = "clusterId240280960" + cluster = {} - with pytest.raises(CustomException): - client.list_instances(parent) + response = client.create_cluster(parent, cluster_id, cluster) + exception = response.exception() + assert exception.errors[0] == error - def test_update_instance(self): + def test_update_cluster(self): # Setup Expected Response name = "name3373707" - display_name_2 = "displayName21615000987" - expected_response = {"name": name, "display_name": display_name_2} - expected_response = instance_pb2.Instance(**expected_response) + location = "location1901043637" + serve_nodes_2 = 1623486220 + expected_response = { + "name": name, + "location": location, + "serve_nodes": serve_nodes_2, + } + expected_response = instance_pb2.Cluster(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_update_cluster", done=True + ) + operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - display_name = "displayName1615086568" + serve_nodes = 1288838783 - response = client.update_instance(display_name) - assert expected_response == response + response = client.update_cluster(serve_nodes) + result = response.result() + assert expected_response == result assert len(channel.requests) == 1 - expected_request = instance_pb2.Instance(display_name=display_name) + expected_request = instance_pb2.Cluster(serve_nodes=serve_nodes) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_update_instance_exception(self): + def test_update_cluster_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_update_cluster_exception", done=True + ) + operation.error.CopyFrom(error) + # Mock the API response - channel = ChannelStub(responses=[CustomException()]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() - # Setup request - display_name = "displayName1615086568" + # Setup Request + serve_nodes = 1288838783 - with pytest.raises(CustomException): - client.update_instance(display_name) + response = client.update_cluster(serve_nodes) + exception = response.exception() + assert exception.errors[0] == error - def test_partial_update_instance(self): + def test_update_app_profile(self): # Setup Expected Response name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name, "etag": etag, "description": description} + expected_response = instance_pb2.AppProfile(**expected_response) operation = operations_pb2.Operation( - name="operations/test_partial_update_instance", done=True + name="operations/test_update_app_profile", done=True ) operation.response.Pack(expected_response) @@ -267,25 +324,25 @@ def test_partial_update_instance(self): client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - instance = {} + app_profile = {} update_mask = {} - response = client.partial_update_instance(instance, update_mask) + response = client.update_app_profile(app_profile, update_mask) result = response.result() assert expected_response == result assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask + expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( + app_profile=app_profile, update_mask=update_mask ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_partial_update_instance_exception(self): + def test_update_app_profile_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name="operations/test_partial_update_instance_exception", done=True + name="operations/test_update_app_profile_exception", done=True ) operation.error.CopyFrom(error) @@ -297,15 +354,22 @@ def test_partial_update_instance_exception(self): client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - instance = {} + app_profile = {} update_mask = {} - response = client.partial_update_instance(instance, update_mask) + response = client.update_app_profile(app_profile, update_mask) exception = response.exception() assert exception.errors[0] == error - def test_delete_instance(self): - channel = ChannelStub() + def test_get_instance(self): + # Setup Expected Response + name_2 = "name2-1052831874" + display_name = "displayName1615086568" + expected_response = {"name": name_2, "display_name": display_name} + expected_response = instance_pb2.Instance(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel @@ -314,14 +378,15 @@ def test_delete_instance(self): # Setup Request name = client.instance_path("[PROJECT]", "[INSTANCE]") - client.delete_instance(name) + response = client.get_instance(name) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) + expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_delete_instance_exception(self): + def test_get_instance_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") @@ -333,70 +398,119 @@ def test_delete_instance_exception(self): name = client.instance_path("[PROJECT]", "[INSTANCE]") with pytest.raises(CustomException): - client.delete_instance(name) + client.get_instance(name) - def test_create_cluster(self): + def test_list_instances(self): # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_cluster", done=True + next_page_token = "nextPageToken-1530815211" + expected_response = {"next_page_token": next_page_token} + expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( + **expected_response ) - operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} + parent = client.project_path("[PROJECT]") - response = client.create_cluster(parent, cluster_id, cluster) - result = response.result() - assert expected_response == result + response = client.list_instances(parent) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster + expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( + parent=parent ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_create_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_cluster_exception", done=True - ) - operation.error.CopyFrom(error) + def test_list_instances_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + parent = client.project_path("[PROJECT]") + + with pytest.raises(CustomException): + client.list_instances(parent) + + def test_update_instance(self): + # Setup Expected Response + name = "name3373707" + display_name_2 = "displayName21615000987" + expected_response = {"name": name, "display_name": display_name_2} + expected_response = instance_pb2.Instance(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} + display_name = "displayName1615086568" - response = client.create_cluster(parent, cluster_id, cluster) - exception = response.exception() - assert exception.errors[0] == error + response = client.update_instance(display_name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = instance_pb2.Instance(display_name=display_name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + display_name = "displayName1615086568" + + with pytest.raises(CustomException): + client.update_instance(display_name) + + def test_delete_instance(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup Request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + client.delete_instance(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Setup request + name = client.instance_path("[PROJECT]", "[INSTANCE]") + + with pytest.raises(CustomException): + client.delete_instance(name) def test_get_cluster(self): # Setup Expected Response @@ -484,63 +598,6 @@ def test_list_clusters_exception(self): with pytest.raises(CustomException): client.list_clusters(parent) - def test_update_cluster(self): - # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes_2 = 1623486220 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes_2, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - serve_nodes = 1288838783 - - response = client.update_cluster(serve_nodes) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Cluster(serve_nodes=serve_nodes) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - serve_nodes = 1288838783 - - response = client.update_cluster(serve_nodes) - exception = response.exception() - assert exception.errors[0] == error - def test_delete_cluster(self): channel = ChannelStub() patch = mock.patch("google.api_core.grpc_helpers.create_channel") @@ -708,63 +765,6 @@ def test_list_app_profiles_exception(self): with pytest.raises(CustomException): list(paged_list_response) - def test_update_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_app_profile", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_app_profile_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_app_profile_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - exception = response.exception() - assert exception.errors[0] == error - def test_delete_app_profile(self): channel = ChannelStub() patch = mock.patch("google.api_core.grpc_helpers.create_channel") diff --git a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py index 48e67ae22..2ca0a34f9 100644 --- a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -68,14 +68,18 @@ class CustomException(Exception): class TestBigtableTableAdminClient(object): - def test_create_table(self): + def test_create_table_from_snapshot(self): # Setup Expected Response name = "name3373707" expected_response = {"name": name} expected_response = table_pb2.Table(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_table_from_snapshot", done=True + ) + operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel @@ -84,41 +88,180 @@ def test_create_table(self): # Setup Request parent = client.instance_path("[PROJECT]", "[INSTANCE]") table_id = "tableId-895419604" - table = {} + source_snapshot = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) - response = client.create_table(parent, table_id, table) - assert expected_response == response + response = client.create_table_from_snapshot(parent, table_id, source_snapshot) + result = response.result() + assert expected_response == result assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, table_id=table_id, table=table + expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( + parent=parent, table_id=table_id, source_snapshot=source_snapshot ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_create_table_exception(self): + def test_create_table_from_snapshot_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_table_from_snapshot_exception", done=True + ) + operation.error.CopyFrom(error) + # Mock the API response - channel = ChannelStub(responses=[CustomException()]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() - # Setup request + # Setup Request parent = client.instance_path("[PROJECT]", "[INSTANCE]") table_id = "tableId-895419604" - table = {} + source_snapshot = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) - with pytest.raises(CustomException): - client.create_table(parent, table_id, table) + response = client.create_table_from_snapshot(parent, table_id, source_snapshot) + exception = response.exception() + assert exception.errors[0] == error - def test_create_table_from_snapshot(self): + def test_snapshot_table(self): + # Setup Expected Response + name_2 = "name2-1052831874" + data_size_bytes = 2110122398 + description = "description-1724546052" + expected_response = { + "name": name_2, + "data_size_bytes": data_size_bytes, + "description": description, + } + expected_response = table_pb2.Snapshot(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_snapshot_table", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + snapshot_id = "snapshotId-168585866" + + response = client.snapshot_table(name, cluster, snapshot_id) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( + name=name, cluster=cluster, snapshot_id=snapshot_id + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_snapshot_table_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_snapshot_table_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + snapshot_id = "snapshotId-168585866" + + response = client.snapshot_table(name, cluster, snapshot_id) + exception = response.exception() + assert exception.errors[0] == error + + def test_create_backup(self): + # Setup Expected Response + name = "name3373707" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_backup", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + backup_id = "backupId1355353272" + backup = {} + + response = client.create_backup(parent, backup_id, backup) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateBackupRequest( + parent=parent, backup_id=backup_id, backup=backup + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_backup_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_backup_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + backup_id = "backupId1355353272" + backup = {} + + response = client.create_backup(parent, backup_id, backup) + exception = response.exception() + assert exception.errors[0] == error + + def test_restore_table(self): # Setup Expected Response name = "name3373707" expected_response = {"name": name} expected_response = table_pb2.Table(**expected_response) operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot", done=True + name="operations/test_restore_table", done=True ) operation.response.Pack(expected_response) @@ -132,26 +275,23 @@ def test_create_table_from_snapshot(self): # Setup Request parent = client.instance_path("[PROJECT]", "[INSTANCE]") table_id = "tableId-895419604" - source_snapshot = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) + response = client.restore_table(parent, table_id) result = response.result() assert expected_response == result assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, table_id=table_id, source_snapshot=source_snapshot + expected_request = bigtable_table_admin_pb2.RestoreTableRequest( + parent=parent, table_id=table_id ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_create_table_from_snapshot_exception(self): + def test_restore_table_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot_exception", done=True + name="operations/test_restore_table_exception", done=True ) operation.error.CopyFrom(error) @@ -165,14 +305,55 @@ def test_create_table_from_snapshot_exception(self): # Setup Request parent = client.instance_path("[PROJECT]", "[INSTANCE]") table_id = "tableId-895419604" - source_snapshot = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) + response = client.restore_table(parent, table_id) exception = response.exception() assert exception.errors[0] == error + def test_create_table(self): + # Setup Expected Response + name = "name3373707" + expected_response = {"name": name} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + table = {} + + response = client.create_table(parent, table_id, table) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateTableRequest( + parent=parent, table_id=table_id, table=table + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + table = {} + + with pytest.raises(CustomException): + client.create_table(parent, table_id, table) + def test_list_tables(self): # Setup Expected Response next_page_token = "" @@ -446,12 +627,17 @@ def test_check_consistency_exception(self): with pytest.raises(CustomException): client.check_consistency(name, consistency_token) - def test_get_iam_policy(self): + def test_get_snapshot(self): # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) + name_2 = "name2-1052831874" + data_size_bytes = 2110122398 + description = "description-1724546052" + expected_response = { + "name": name_2, + "data_size_bytes": data_size_bytes, + "description": description, + } + expected_response = table_pb2.Snapshot(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) @@ -461,204 +647,11 @@ def test_get_iam_policy(self): client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - resource = "resource-341064690" + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) - - def test_snapshot_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_snapshot_table", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - snapshot_id = "snapshotId-168585866" - - response = client.snapshot_table(name, cluster, snapshot_id) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, cluster=cluster, snapshot_id=snapshot_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_snapshot_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_snapshot_table_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - snapshot_id = "snapshotId-168585866" - - response = client.snapshot_table(name, cluster, snapshot_id) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_snapshot(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.get_snapshot(name) + response = client.get_snapshot(name) assert expected_response == response assert len(channel.requests) == 1 @@ -762,76 +755,57 @@ def test_delete_snapshot_exception(self): with pytest.raises(CustomException): client.delete_snapshot(name) - def test_create_backup(self): + def test_get_backup(self): # Setup Expected Response - name = "name3373707" + name_2 = "name2-1052831874" source_table = "sourceTable1670858410" size_bytes = 1796325715 expected_response = { - "name": name, + "name": name_2, "source_table": source_table, "size_bytes": size_bytes, } expected_response = table_pb2.Backup(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_backup", done=True - ) - operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - backup_id = "backupId1355353272" - backup = {} + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - response = client.create_backup(parent, backup_id, backup) - result = response.result() - assert expected_response == result + response = client.get_backup(name) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateBackupRequest( - parent=parent, backup_id=backup_id, backup=backup - ) + expected_request = bigtable_table_admin_pb2.GetBackupRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_create_backup_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_backup_exception", done=True - ) - operation.error.CopyFrom(error) - + def test_get_backup_exception(self): # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - backup_id = "backupId1355353272" - backup = {} + # Setup request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - response = client.create_backup(parent, backup_id, backup) - exception = response.exception() - assert exception.errors[0] == error + with pytest.raises(CustomException): + client.get_backup(name) - def test_get_backup(self): + def test_update_backup(self): # Setup Expected Response - name_2 = "name2-1052831874" + name = "name3373707" source_table = "sourceTable1670858410" size_bytes = 1796325715 expected_response = { - "name": name_2, + "name": name, "source_table": source_table, "size_bytes": size_bytes, } @@ -845,17 +819,52 @@ def test_get_backup(self): client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + backup = {} + update_mask = {} - response = client.get_backup(name) + response = client.update_backup(backup, update_mask) assert expected_response == response assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetBackupRequest(name=name) + expected_request = bigtable_table_admin_pb2.UpdateBackupRequest( + backup=backup, update_mask=update_mask + ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_get_backup_exception(self): + def test_update_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + backup = {} + update_mask = {} + + with pytest.raises(CustomException): + client.update_backup(backup, update_mask) + + def test_delete_backup(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + client.delete_backup(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_backup_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") @@ -867,7 +876,7 @@ def test_get_backup_exception(self): name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") with pytest.raises(CustomException): - client.get_backup(name) + client.delete_backup(name) def test_list_backups(self): # Setup Expected Response @@ -914,17 +923,12 @@ def test_list_backups_exception(self): with pytest.raises(CustomException): list(paged_list_response) - def test_update_backup(self): + def test_get_iam_policy(self): # Setup Expected Response - name = "name3373707" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) + version = 351608024 + etag = b"21" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) @@ -934,20 +938,17 @@ def test_update_backup(self): client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - backup = {} - update_mask = {} + resource = "resource-341064690" - response = client.update_backup(backup, update_mask) + response = client.get_iam_policy(resource) assert expected_response == response assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.UpdateBackupRequest( - backup=backup, update_mask=update_mask - ) + expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_update_backup_exception(self): + def test_get_iam_policy_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") @@ -956,30 +957,40 @@ def test_update_backup_exception(self): client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - backup = {} - update_mask = {} + resource = "resource-341064690" with pytest.raises(CustomException): - client.update_backup(backup, update_mask) + client.get_iam_policy(resource) - def test_delete_backup(self): - channel = ChannelStub() + def test_set_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"21" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + resource = "resource-341064690" + policy = {} - client.delete_backup(name) + response = client.set_iam_policy(resource, policy) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name) + expected_request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, policy=policy + ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_delete_backup_exception(self): + def test_set_iam_policy_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") @@ -988,62 +999,51 @@ def test_delete_backup_exception(self): client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + resource = "resource-341064690" + policy = {} with pytest.raises(CustomException): - client.delete_backup(name) + client.set_iam_policy(resource, policy) - def test_restore_table(self): + def test_test_iam_permissions(self): # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_restore_table", done=True + expected_response = {} + expected_response = iam_policy_pb2.TestIamPermissionsResponse( + **expected_response ) - operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" + resource = "resource-341064690" + permissions = [] - response = client.restore_table(parent, table_id) - result = response.result() - assert expected_response == result + response = client.test_iam_permissions(resource, permissions) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.RestoreTableRequest( - parent=parent, table_id=table_id + expected_request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_restore_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_restore_table_exception", done=True - ) - operation.error.CopyFrom(error) - + def test_test_iam_permissions_exception(self): # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" + # Setup request + resource = "resource-341064690" + permissions = [] - response = client.restore_table(parent, table_id) - exception = response.exception() - assert exception.errors[0] == error + with pytest.raises(CustomException): + client.test_iam_permissions(resource, permissions) From 10c6166018e135f31d6ec3453ff4ae8500e425b0 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 13 Feb 2021 06:19:02 -0800 Subject: [PATCH 10/11] chore: migrate bigtable to the Python microgenerator PiperOrigin-RevId: 356992836 Source-Author: Google APIs Source-Date: Thu Feb 11 09:33:53 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: 1e0c7413684ca6f6322620ecfc0d3e0352933dc1 Source-Link: https://github.com/googleapis/googleapis/commit/1e0c7413684ca6f6322620ecfc0d3e0352933dc1 --- google/cloud/bigtable_admin_v2/__init__.py | 169 +- .../cloud/bigtable_admin_v2/gapic/__init__.py | 0 .../gapic/bigtable_instance_admin_client.py | 1894 --------- .../bigtable_instance_admin_client_config.py | 118 - .../gapic/bigtable_table_admin_client.py | 2297 ----------- .../bigtable_table_admin_client_config.py | 133 - google/cloud/bigtable_admin_v2/gapic/enums.py | 213 - .../gapic/transports/__init__.py | 0 .../bigtable_instance_admin_grpc_transport.py | 380 -- .../bigtable_table_admin_grpc_transport.py | 471 --- .../cloud/bigtable_admin_v2/proto/__init__.py | 0 .../proto/bigtable_instance_admin_pb2.py | 2434 ----------- .../proto/bigtable_instance_admin_pb2_grpc.py | 933 ----- .../proto/bigtable_table_admin_pb2.py | 3577 ----------------- .../proto/bigtable_table_admin_pb2_grpc.py | 1134 ------ .../bigtable_admin_v2/proto/common_pb2.py | 188 - .../proto/common_pb2_grpc.py | 3 - .../bigtable_admin_v2/proto/instance_pb2.py | 886 ---- .../proto/instance_pb2_grpc.py | 3 - .../bigtable_admin_v2/proto/table_pb2.py | 1682 -------- .../bigtable_admin_v2/proto/table_pb2_grpc.py | 3 - .../bigtable_instance_admin/client.py | 17 +- .../bigtable_instance_admin/pagers.py | 11 +- .../services/bigtable_table_admin/client.py | 17 +- .../services/bigtable_table_admin/pagers.py | 11 +- google/cloud/bigtable_admin_v2/types.py | 76 - google/cloud/bigtable_v2/__init__.py | 75 +- google/cloud/bigtable_v2/gapic/__init__.py | 0 .../bigtable_v2/gapic/bigtable_client.py | 771 ---- .../gapic/bigtable_client_config.py | 53 - .../bigtable_v2/gapic/transports/__init__.py | 0 .../transports/bigtable_grpc_transport.py | 207 - google/cloud/bigtable_v2/proto/__init__.py | 0 .../cloud/bigtable_v2/proto/bigtable_pb2.py | 1798 --------- .../bigtable_v2/proto/bigtable_pb2_grpc.py | 329 -- google/cloud/bigtable_v2/proto/data_pb2.py | 2668 ------------ .../cloud/bigtable_v2/proto/data_pb2_grpc.py | 3 - .../bigtable_v2/services/bigtable/client.py | 24 +- google/cloud/bigtable_v2/types.py | 54 - synth.metadata | 82 +- .../unit/gapic/v2/test_bigtable_client_v2.py | 316 -- .../test_bigtable_instance_admin_client_v2.py | 926 ----- .../v2/test_bigtable_table_admin_client_v2.py | 1049 ----- 43 files changed, 289 insertions(+), 24716 deletions(-) delete mode 100644 google/cloud/bigtable_admin_v2/gapic/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/enums.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/transports/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/common_pb2.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/instance_pb2.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/table_pb2.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py delete mode 100644 google/cloud/bigtable_admin_v2/types.py delete mode 100644 google/cloud/bigtable_v2/gapic/__init__.py delete mode 100644 google/cloud/bigtable_v2/gapic/bigtable_client.py delete mode 100644 google/cloud/bigtable_v2/gapic/bigtable_client_config.py delete mode 100644 google/cloud/bigtable_v2/gapic/transports/__init__.py delete mode 100644 google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py delete mode 100644 google/cloud/bigtable_v2/proto/__init__.py delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_pb2.py delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py delete mode 100644 google/cloud/bigtable_v2/proto/data_pb2.py delete mode 100644 google/cloud/bigtable_v2/proto/data_pb2_grpc.py delete mode 100644 google/cloud/bigtable_v2/types.py delete mode 100644 tests/unit/gapic/v2/test_bigtable_client_v2.py delete mode 100644 tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py delete mode 100644 tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py index 9f72d4f53..cf8e394ef 100644 --- a/google/cloud/bigtable_admin_v2/__init__.py +++ b/google/cloud/bigtable_admin_v2/__init__.py @@ -1,54 +1,153 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.bigtable_admin_v2 import types -from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client -from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client -from google.cloud.bigtable_admin_v2.gapic import enums - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7. " - "More details about Python 2 support for Google Cloud Client Libraries " - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class BigtableInstanceAdminClient( - bigtable_instance_admin_client.BigtableInstanceAdminClient -): - __doc__ = bigtable_instance_admin_client.BigtableInstanceAdminClient.__doc__ - enums = enums - - -class BigtableTableAdminClient(bigtable_table_admin_client.BigtableTableAdminClient): - __doc__ = bigtable_table_admin_client.BigtableTableAdminClient.__doc__ - enums = enums +from .services.bigtable_instance_admin import BigtableInstanceAdminClient +from .services.bigtable_table_admin import BigtableTableAdminClient +from .types.bigtable_instance_admin import CreateAppProfileRequest +from .types.bigtable_instance_admin import CreateClusterMetadata +from .types.bigtable_instance_admin import CreateClusterRequest +from .types.bigtable_instance_admin import CreateInstanceMetadata +from .types.bigtable_instance_admin import CreateInstanceRequest +from .types.bigtable_instance_admin import DeleteAppProfileRequest +from .types.bigtable_instance_admin import DeleteClusterRequest +from .types.bigtable_instance_admin import DeleteInstanceRequest +from .types.bigtable_instance_admin import GetAppProfileRequest +from .types.bigtable_instance_admin import GetClusterRequest +from .types.bigtable_instance_admin import GetInstanceRequest +from .types.bigtable_instance_admin import ListAppProfilesRequest +from .types.bigtable_instance_admin import ListAppProfilesResponse +from .types.bigtable_instance_admin import ListClustersRequest +from .types.bigtable_instance_admin import ListClustersResponse +from .types.bigtable_instance_admin import ListInstancesRequest +from .types.bigtable_instance_admin import ListInstancesResponse +from .types.bigtable_instance_admin import PartialUpdateInstanceRequest +from .types.bigtable_instance_admin import UpdateAppProfileMetadata +from .types.bigtable_instance_admin import UpdateAppProfileRequest +from .types.bigtable_instance_admin import UpdateClusterMetadata +from .types.bigtable_instance_admin import UpdateInstanceMetadata +from .types.bigtable_table_admin import CheckConsistencyRequest +from .types.bigtable_table_admin import CheckConsistencyResponse +from .types.bigtable_table_admin import CreateBackupMetadata +from .types.bigtable_table_admin import CreateBackupRequest +from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata +from .types.bigtable_table_admin import CreateTableFromSnapshotRequest +from .types.bigtable_table_admin import CreateTableRequest +from .types.bigtable_table_admin import DeleteBackupRequest +from .types.bigtable_table_admin import DeleteSnapshotRequest +from .types.bigtable_table_admin import DeleteTableRequest +from .types.bigtable_table_admin import DropRowRangeRequest +from .types.bigtable_table_admin import GenerateConsistencyTokenRequest +from .types.bigtable_table_admin import GenerateConsistencyTokenResponse +from .types.bigtable_table_admin import GetBackupRequest +from .types.bigtable_table_admin import GetSnapshotRequest +from .types.bigtable_table_admin import GetTableRequest +from .types.bigtable_table_admin import ListBackupsRequest +from .types.bigtable_table_admin import ListBackupsResponse +from .types.bigtable_table_admin import ListSnapshotsRequest +from .types.bigtable_table_admin import ListSnapshotsResponse +from .types.bigtable_table_admin import ListTablesRequest +from .types.bigtable_table_admin import ListTablesResponse +from .types.bigtable_table_admin import ModifyColumnFamiliesRequest +from .types.bigtable_table_admin import OptimizeRestoredTableMetadata +from .types.bigtable_table_admin import RestoreTableMetadata +from .types.bigtable_table_admin import RestoreTableRequest +from .types.bigtable_table_admin import SnapshotTableMetadata +from .types.bigtable_table_admin import SnapshotTableRequest +from .types.bigtable_table_admin import UpdateBackupRequest +from .types.common import OperationProgress +from .types.common import StorageType +from .types.instance import AppProfile +from .types.instance import Cluster +from .types.instance import Instance +from .types.table import Backup +from .types.table import BackupInfo +from .types.table import ColumnFamily +from .types.table import GcRule +from .types.table import RestoreInfo +from .types.table import RestoreSourceType +from .types.table import Snapshot +from .types.table import Table __all__ = ( - "enums", - "types", - "BigtableInstanceAdminClient", + "AppProfile", + "Backup", + "BackupInfo", "BigtableTableAdminClient", + "CheckConsistencyRequest", + "CheckConsistencyResponse", + "Cluster", + "ColumnFamily", + "CreateAppProfileRequest", + "CreateBackupMetadata", + "CreateBackupRequest", + "CreateClusterMetadata", + "CreateClusterRequest", + "CreateInstanceMetadata", + "CreateInstanceRequest", + "CreateTableFromSnapshotMetadata", + "CreateTableFromSnapshotRequest", + "CreateTableRequest", + "DeleteAppProfileRequest", + "DeleteBackupRequest", + "DeleteClusterRequest", + "DeleteInstanceRequest", + "DeleteSnapshotRequest", + "DeleteTableRequest", + "DropRowRangeRequest", + "GcRule", + "GenerateConsistencyTokenRequest", + "GenerateConsistencyTokenResponse", + "GetAppProfileRequest", + "GetBackupRequest", + "GetClusterRequest", + "GetInstanceRequest", + "GetSnapshotRequest", + "GetTableRequest", + "Instance", + "ListAppProfilesRequest", + "ListAppProfilesResponse", + "ListBackupsRequest", + "ListBackupsResponse", + "ListClustersRequest", + "ListClustersResponse", + "ListInstancesRequest", + "ListInstancesResponse", + "ListSnapshotsRequest", + "ListSnapshotsResponse", + "ListTablesRequest", + "ListTablesResponse", + "ModifyColumnFamiliesRequest", + "OperationProgress", + "OptimizeRestoredTableMetadata", + "PartialUpdateInstanceRequest", + "RestoreInfo", + "RestoreSourceType", + "RestoreTableMetadata", + "RestoreTableRequest", + "Snapshot", + "SnapshotTableMetadata", + "SnapshotTableRequest", + "StorageType", + "Table", + "UpdateAppProfileMetadata", + "UpdateAppProfileRequest", + "UpdateBackupRequest", + "UpdateClusterMetadata", + "UpdateInstanceMetadata", + "BigtableInstanceAdminClient", ) diff --git a/google/cloud/bigtable_admin_v2/gapic/__init__.py b/google/cloud/bigtable_admin_v2/gapic/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py deleted file mode 100644 index 202f2cacf..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ /dev/null @@ -1,1894 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.bigtable.admin.v2 BigtableInstanceAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client_config -from google.cloud.bigtable_admin_v2.gapic import enums -from google.cloud.bigtable_admin_v2.gapic.transports import ( - bigtable_instance_admin_grpc_transport, -) -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable-admin", -).version - - -class BigtableInstanceAdminClient(object): - """ - Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - SERVICE_ADDRESS = "bigtableadmin.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.bigtable.admin.v2.BigtableInstanceAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableInstanceAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def app_profile_path(cls, project, instance, app_profile): - """Return a fully-qualified app_profile string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/appProfiles/{app_profile}", - project=project, - instance=instance, - app_profile=app_profile, - ) - - @classmethod - def cluster_path(cls, project, instance, cluster): - """Return a fully-qualified cluster string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}", - project=project, - instance=instance, - cluster=cluster, - ) - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - - @classmethod - def location_path(cls, project, location): - """Return a fully-qualified location string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}", - project=project, - location=location, - ) - - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", project=project, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.BigtableInstanceAdminGrpcTransport, - Callable[[~.Credentials, type], ~.BigtableInstanceAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = bigtable_instance_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_instance( - self, - parent, - instance_id, - instance, - clusters, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Create an instance within a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `instance_id`: - >>> instance_id = '' - >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} - >>> - >>> # TODO: Initialize `clusters`: - >>> clusters = {} - >>> - >>> response = client.create_instance(parent, instance_id, instance, clusters) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The unique name of the project in which to create the new - instance. Values are of the form ``projects/{project}``. - instance_id (str): Required. The ID to be used when referring to the new instance - within its project, e.g., just ``myinstance`` rather than - ``projects/myproject/instances/myinstance``. - instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): Required. The instance to create. Fields marked ``OutputOnly`` must - be left blank. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Instance` - clusters (dict[str -> Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]]): Required. The clusters to be created within the instance, mapped by - desired cluster ID, e.g., just ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. Fields - marked ``OutputOnly`` must be left blank. Currently, at most four - clusters can be specified. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "create_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_instance, - default_retry=self._method_configs["CreateInstance"].retry, - default_timeout=self._method_configs["CreateInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, - instance_id=instance_id, - instance=instance, - clusters=clusters, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Instance, - metadata_type=bigtable_instance_admin_pb2.CreateInstanceMetadata, - ) - - def partial_update_instance( - self, - instance, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Partially updates an instance within a project. This method can modify all - fields of an Instance and is the preferred way to update an Instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.partial_update_instance(instance, update_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): Required. The Instance which will (partially) replace the current value. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Instance` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of Instance fields which should be replaced. - Must be explicitly set. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "partial_update_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "partial_update_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.partial_update_instance, - default_retry=self._method_configs["PartialUpdateInstance"].retry, - default_timeout=self._method_configs["PartialUpdateInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("instance.name", instance.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["partial_update_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Instance, - metadata_type=bigtable_instance_admin_pb2.UpdateInstanceMetadata, - ) - - def create_cluster( - self, - parent, - cluster_id, - cluster, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a cluster within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = {} - >>> - >>> response = client.create_cluster(parent, cluster_id, cluster) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The unique name of the instance in which to create the new - cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - cluster_id (str): Required. The ID to be used when referring to the new cluster within - its instance, e.g., just ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): Required. The cluster to be created. Fields marked ``OutputOnly`` - must be left blank. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "create_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_cluster, - default_retry=self._method_configs["CreateCluster"].retry, - default_timeout=self._method_configs["CreateCluster"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Cluster, - metadata_type=bigtable_instance_admin_pb2.CreateClusterMetadata, - ) - - def update_cluster( - self, - serve_nodes, - name=None, - location=None, - state=None, - default_storage_type=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a cluster within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `serve_nodes`: - >>> serve_nodes = 0 - >>> - >>> response = client.update_cluster(serve_nodes) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - serve_nodes (int): Required. The number of nodes allocated to this cluster. More nodes enable - higher throughput and more consistent performance. - name (str): The unique name of the cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. - location (str): (``CreationOnly``) The location where this cluster's nodes and - storage reside. For best performance, clients should be located as close - as possible to this cluster. Currently only zones are supported, so - values should be of the form ``projects/{project}/locations/{zone}``. - state (~google.cloud.bigtable_admin_v2.types.State): The current state of the cluster. - default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) The type of storage used by this cluster to serve - its parent instance's tables, unless explicitly overridden. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "update_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_cluster, - default_retry=self._method_configs["UpdateCluster"].retry, - default_timeout=self._method_configs["UpdateCluster"].timeout, - client_info=self._client_info, - ) - - request = instance_pb2.Cluster( - serve_nodes=serve_nodes, - name=name, - location=location, - state=state, - default_storage_type=default_storage_type, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Cluster, - metadata_type=bigtable_instance_admin_pb2.UpdateClusterMetadata, - ) - - def update_app_profile( - self, - app_profile, - update_mask, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an app profile within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `app_profile`: - >>> app_profile = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_app_profile(app_profile, update_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): Required. The app profile which will (partially) replace the current value. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of app profile fields which should be replaced. - If unset, all fields will be replaced. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` - ignore_warnings (bool): If true, ignore safety checks when updating the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "update_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_app_profile, - default_retry=self._method_configs["UpdateAppProfile"].retry, - default_timeout=self._method_configs["UpdateAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, - update_mask=update_mask, - ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("app_profile.name", app_profile.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.AppProfile, - metadata_type=bigtable_instance_admin_pb2.UpdateAppProfileMetadata, - ) - - def get_instance( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> response = client.get_instance(name) - - Args: - name (str): Required. The unique name of the requested instance. Values are of - the form ``projects/{project}/instances/{instance}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "get_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_instance, - default_retry=self._method_configs["GetInstance"].retry, - default_timeout=self._method_configs["GetInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_instances( - self, - parent, - page_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists information about instances in a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> response = client.list_instances(parent) - - Args: - parent (str): Required. The unique name of the project for which a list of - instances is requested. Values are of the form ``projects/{project}``. - page_token (str): DEPRECATED: This field is unused and ignored. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.ListInstancesResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_instances" not in self._inner_api_calls: - self._inner_api_calls[ - "list_instances" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_instances, - default_retry=self._method_configs["ListInstances"].retry, - default_timeout=self._method_configs["ListInstances"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent, page_token=page_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_instances"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_instance( - self, - display_name, - name=None, - state=None, - type_=None, - labels=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an instance within a project. This method updates only the display - name and type for an Instance. To update other Instance properties, such as - labels, use PartialUpdateInstance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `display_name`: - >>> display_name = '' - >>> - >>> response = client.update_instance(display_name) - - Args: - display_name (str): Required. The descriptive name for this instance as it appears in UIs. - Can be changed at any time, but should be kept globally unique - to avoid confusion. - name (str): The unique name of the instance. Values are of the form - ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. - state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the instance. - type_ (~google.cloud.bigtable_admin_v2.types.Type): The type of the instance. Defaults to ``PRODUCTION``. - labels (dict[str -> str]): Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. They can be used to filter resources and - aggregate metrics. - - - Label keys must be between 1 and 63 characters long and must conform - to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - - Label values must be between 0 and 63 characters long and must - conform to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given resource. - - Keys and values must both be under 128 bytes. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "update_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_instance, - default_retry=self._method_configs["UpdateInstance"].retry, - default_timeout=self._method_configs["UpdateInstance"].timeout, - client_info=self._client_info, - ) - - request = instance_pb2.Instance( - display_name=display_name, - name=name, - state=state, - type=type_, - labels=labels, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_instance( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Delete an instance from a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> client.delete_instance(name) - - Args: - name (str): Required. The unique name of the instance to be deleted. Values are - of the form ``projects/{project}/instances/{instance}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_instance, - default_retry=self._method_configs["DeleteInstance"].retry, - default_timeout=self._method_configs["DeleteInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_cluster( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about a cluster. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> response = client.get_cluster(name) - - Args: - name (str): Required. The unique name of the requested cluster. Values are of - the form ``projects/{project}/instances/{instance}/clusters/{cluster}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Cluster` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "get_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_cluster, - default_retry=self._method_configs["GetCluster"].retry, - default_timeout=self._method_configs["GetCluster"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.GetClusterRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_clusters( - self, - parent, - page_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists information about clusters in an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> response = client.list_clusters(parent) - - Args: - parent (str): Required. The unique name of the instance for which a list of - clusters is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use ``{instance} = '-'`` to - list Clusters for all Instances in a project, e.g., - ``projects/myproject/instances/-``. - page_token (str): DEPRECATED: This field is unused and ignored. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.ListClustersResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_clusters" not in self._inner_api_calls: - self._inner_api_calls[ - "list_clusters" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_clusters, - default_retry=self._method_configs["ListClusters"].retry, - default_timeout=self._method_configs["ListClusters"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent, page_token=page_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_clusters"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_cluster( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a cluster from an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> client.delete_cluster(name) - - Args: - name (str): Required. The unique name of the cluster to be deleted. Values are - of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_cluster, - default_retry=self._method_configs["DeleteCluster"].retry, - default_timeout=self._method_configs["DeleteCluster"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_app_profile( - self, - parent, - app_profile_id, - app_profile, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates an app profile within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `app_profile_id`: - >>> app_profile_id = '' - >>> - >>> # TODO: Initialize `app_profile`: - >>> app_profile = {} - >>> - >>> response = client.create_app_profile(parent, app_profile_id, app_profile) - - Args: - parent (str): Required. The unique name of the instance in which to create the new - app profile. Values are of the form - ``projects/{project}/instances/{instance}``. - app_profile_id (str): Required. The ID to be used when referring to the new app profile - within its instance, e.g., just ``myprofile`` rather than - ``projects/myproject/instances/myinstance/appProfiles/myprofile``. - app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): Required. The app profile to be created. Fields marked - ``OutputOnly`` will be ignored. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - ignore_warnings (bool): If true, ignore safety checks when creating the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "create_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_app_profile, - default_retry=self._method_configs["CreateAppProfile"].retry, - default_timeout=self._method_configs["CreateAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.CreateAppProfileRequest( - parent=parent, - app_profile_id=app_profile_id, - app_profile=app_profile, - ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_app_profile( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about an app profile. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') - >>> - >>> response = client.get_app_profile(name) - - Args: - name (str): Required. The unique name of the requested app profile. Values are - of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "get_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_app_profile, - default_retry=self._method_configs["GetAppProfile"].retry, - default_timeout=self._method_configs["GetAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_app_profiles( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists information about app profiles in an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_app_profiles(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_app_profiles(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The unique name of the instance for which a list of app - profiles is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use ``{instance} = '-'`` to - list AppProfiles for all Instances in a project, e.g., - ``projects/myproject/instances/-``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_app_profiles" not in self._inner_api_calls: - self._inner_api_calls[ - "list_app_profiles" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_app_profiles, - default_retry=self._method_configs["ListAppProfiles"].retry, - default_timeout=self._method_configs["ListAppProfiles"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_app_profiles"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="app_profiles", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_app_profile( - self, - name, - ignore_warnings, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes an app profile from an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') - >>> - >>> # TODO: Initialize `ignore_warnings`: - >>> ignore_warnings = False - >>> - >>> client.delete_app_profile(name, ignore_warnings) - - Args: - name (str): Required. The unique name of the app profile to be deleted. Values - are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - ignore_warnings (bool): Required. If true, ignore safety checks when deleting the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_app_profile, - default_retry=self._method_configs["DeleteAppProfile"].retry, - default_timeout=self._method_configs["DeleteAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name, ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_iam_policy( - self, - resource, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> response = client.get_iam_policy(resource) - - Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_iam_policy( - self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the access control policy on an instance resource. Replaces any - existing policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.set_iam_policy(resource, policy) - - Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Policy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def test_iam_permissions( - self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns permissions that the caller has on the specified instance resource. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] - >>> - >>> response = client.test_iam_permissions(resource, permissions) - - Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions - with wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: - self._inner_api_calls[ - "test_iam_permissions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["test_iam_permissions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py deleted file mode 100644 index 4301a8226..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py +++ /dev/null @@ -1,118 +0,0 @@ -config = { - "interfaces": { - "google.bigtable.admin.v2.BigtableInstanceAdmin": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "CreateInstance": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "PartialUpdateInstance": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "CreateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetInstance": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListInstances": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "UpdateInstance": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteInstance": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListClusters": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "DeleteCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "CreateAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListAppProfiles": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "DeleteAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "TestIamPermissions": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py deleted file mode 100644 index 11019b9c1..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ /dev/null @@ -1,2297 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.bigtable.admin.v2 BigtableTableAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client_config -from google.cloud.bigtable_admin_v2.gapic import enums -from google.cloud.bigtable_admin_v2.gapic.transports import ( - bigtable_table_admin_grpc_transport, -) -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2_grpc -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable-admin", -).version - - -class BigtableTableAdminClient(object): - """ - Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - SERVICE_ADDRESS = "bigtableadmin.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.bigtable.admin.v2.BigtableTableAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableTableAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def backup_path(cls, project, instance, cluster, backup): - """Return a fully-qualified backup string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}", - project=project, - instance=instance, - cluster=cluster, - backup=backup, - ) - - @classmethod - def cluster_path(cls, project, instance, cluster): - """Return a fully-qualified cluster string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}", - project=project, - instance=instance, - cluster=cluster, - ) - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - - @classmethod - def snapshot_path(cls, project, instance, cluster, snapshot): - """Return a fully-qualified snapshot string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}", - project=project, - instance=instance, - cluster=cluster, - snapshot=snapshot, - ) - - @classmethod - def table_path(cls, project, instance, table): - """Return a fully-qualified table string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/tables/{table}", - project=project, - instance=instance, - table=table, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.BigtableTableAdminGrpcTransport, - Callable[[~.Credentials, type], ~.BigtableTableAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = bigtable_table_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_table_from_snapshot( - self, - parent, - table_id, - source_snapshot, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `table_id`: - >>> table_id = '' - >>> source_snapshot = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') - >>> - >>> response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The unique name of the instance in which to create the - table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id (str): Required. The name by which the new table should be referred to - within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. - source_snapshot (str): Required. The unique name of the snapshot from which to restore the - table. The snapshot and the table must be in the same instance. Values - are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_table_from_snapshot" not in self._inner_api_calls: - self._inner_api_calls[ - "create_table_from_snapshot" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_table_from_snapshot, - default_retry=self._method_configs["CreateTableFromSnapshot"].retry, - default_timeout=self._method_configs["CreateTableFromSnapshot"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, table_id=table_id, source_snapshot=source_snapshot, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_table_from_snapshot"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Table, - metadata_type=bigtable_table_admin_pb2.CreateTableFromSnapshotMetadata, - ) - - def snapshot_table( - self, - name, - cluster, - snapshot_id, - ttl=None, - description=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> cluster = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # TODO: Initialize `snapshot_id`: - >>> snapshot_id = '' - >>> - >>> response = client.snapshot_table(name, cluster, snapshot_id) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The unique name of the table to have the snapshot taken. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - cluster (str): Required. The name of the cluster where the snapshot will be created - in. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - snapshot_id (str): Required. The ID by which the new snapshot should be referred to - within the parent cluster, e.g., ``mysnapshot`` of the form: - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. - ttl (Union[dict, ~google.cloud.bigtable_admin_v2.types.Duration]): The amount of time that the new snapshot can stay active after it is - created. Once 'ttl' expires, the snapshot will get deleted. The maximum - amount of time a snapshot can stay active is 7 days. If 'ttl' is not - specified, the default value of 24 hours will be used. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Duration` - description (str): Description of the snapshot. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "snapshot_table" not in self._inner_api_calls: - self._inner_api_calls[ - "snapshot_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.snapshot_table, - default_retry=self._method_configs["SnapshotTable"].retry, - default_timeout=self._method_configs["SnapshotTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, - cluster=cluster, - snapshot_id=snapshot_id, - ttl=ttl, - description=description, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["snapshot_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Snapshot, - metadata_type=bigtable_table_admin_pb2.SnapshotTableMetadata, - ) - - def create_backup( - self, - parent, - backup_id, - backup, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Starts creating a new Cloud Bigtable Backup. The returned backup - ``long-running operation`` can be used to track creation of the backup. - The ``metadata`` field type is ``CreateBackupMetadata``. The - ``response`` field type is ``Backup``, if successful. Cancelling the - returned operation will stop the creation and delete the backup. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # TODO: Initialize `backup_id`: - >>> backup_id = '' - >>> - >>> # TODO: Initialize `backup`: - >>> backup = {} - >>> - >>> response = client.create_backup(parent, backup_id, backup) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. This must be one of the clusters in the instance in which - this table is located. The backup will be stored in this cluster. Values - are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - backup_id (str): Required. The id of the backup to be created. The ``backup_id`` - along with the parent ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup name, of the - form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in length and match the - regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Backup` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "create_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_backup, - default_retry=self._method_configs["CreateBackup"].retry, - default_timeout=self._method_configs["CreateBackup"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CreateBackupRequest( - parent=parent, backup_id=backup_id, backup=backup, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Backup, - metadata_type=bigtable_table_admin_pb2.CreateBackupMetadata, - ) - - def restore_table( - self, - parent, - table_id, - backup=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Create a new table by restoring from a completed backup. The new - table must be in the same instance as the instance containing the - backup. The returned table ``long-running operation`` can be used to - track the progress of the operation, and to cancel it. The ``metadata`` - field type is ``RestoreTableMetadata``. The ``response`` type is - ``Table``, if successful. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `table_id`: - >>> table_id = '' - >>> - >>> response = client.restore_table(parent, table_id) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The name of the instance in which to create the restored - table. This instance must be the parent of the source backup. Values are - of the form ``projects//instances/``. - table_id (str): Required. The id of the table to create and restore to. This table - must not already exist. The ``table_id`` appended to ``parent`` forms - the full table name of the form - ``projects//instances//tables/``. - backup (str): Name of the backup from which to restore. Values are of the form - ``projects//instances//clusters//backups/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "restore_table" not in self._inner_api_calls: - self._inner_api_calls[ - "restore_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.restore_table, - default_retry=self._method_configs["RestoreTable"].retry, - default_timeout=self._method_configs["RestoreTable"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof(backup=backup,) - - request = bigtable_table_admin_pb2.RestoreTableRequest( - parent=parent, table_id=table_id, backup=backup, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["restore_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Table, - metadata_type=bigtable_table_admin_pb2.RestoreTableMetadata, - ) - - def create_table( - self, - parent, - table_id, - table, - initial_splits=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `table_id`: - >>> table_id = '' - >>> - >>> # TODO: Initialize `table`: - >>> table = {} - >>> - >>> response = client.create_table(parent, table_id, table) - - Args: - parent (str): Required. The unique name of the instance in which to create the - table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id (str): Required. The name by which the new table should be referred to - within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. Maximum 50 characters. - table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): Required. The Table to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Table` - initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split - the table into several tablets (tablets are similar to HBase regions). - Given two split keys, ``s1`` and ``s2``, three tablets will be created, - spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. - - Example: - - - Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",`` - ``"other", "zz"]`` - - initial_split_keys := - ``["apple", "customer_1", "customer_2", "other"]`` - - Key assignment: - - - Tablet 1 ``[, apple) => {"a"}.`` - - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Split` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_table" not in self._inner_api_calls: - self._inner_api_calls[ - "create_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_table, - default_retry=self._method_configs["CreateTable"].retry, - default_timeout=self._method_configs["CreateTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, - table_id=table_id, - table=table, - initial_splits=initial_splits, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_tables( - self, - parent, - view=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all tables served from a specified instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_tables(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_tables(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The unique name of the instance for which tables should be - listed. Values are of the form - ``projects/{project}/instances/{instance}``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. Only - NAME_ONLY view (default) and REPLICATION_VIEW are supported. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Table` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_tables" not in self._inner_api_calls: - self._inner_api_calls[ - "list_tables" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_tables, - default_retry=self._method_configs["ListTables"].retry, - default_timeout=self._method_configs["ListTables"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ListTablesRequest( - parent=parent, view=view, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_tables"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="tables", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_table( - self, - name, - view=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets metadata information about the specified table. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> response = client.get_table(name) - - Args: - name (str): Required. The unique name of the requested table. Values are of the - form ``projects/{project}/instances/{instance}/tables/{table}``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned table's fields. Defaults to - ``SCHEMA_VIEW`` if unspecified. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_table" not in self._inner_api_calls: - self._inner_api_calls[ - "get_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_table, - default_retry=self._method_configs["GetTable"].retry, - default_timeout=self._method_configs["GetTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GetTableRequest(name=name, view=view,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_table( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Permanently deletes a specified table and all of its data. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> client.delete_table(name) - - Args: - name (str): Required. The unique name of the table to be deleted. Values are of - the form ``projects/{project}/instances/{instance}/tables/{table}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_table" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_table, - default_retry=self._method_configs["DeleteTable"].retry, - default_timeout=self._method_configs["DeleteTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.DeleteTableRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def modify_column_families( - self, - name, - modifications, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `modifications`: - >>> modifications = [] - >>> - >>> response = client.modify_column_families(name, modifications) - - Args: - name (str): Required. The unique name of the table whose families should be - modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Required. Modifications to be atomically applied to the specified table's families. - Entries are applied in order, meaning that earlier modifications can be - masked by later ones (in the case of repeated updates to the same family, - for example). - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Modification` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "modify_column_families" not in self._inner_api_calls: - self._inner_api_calls[ - "modify_column_families" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.modify_column_families, - default_retry=self._method_configs["ModifyColumnFamilies"].retry, - default_timeout=self._method_configs["ModifyColumnFamilies"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, modifications=modifications, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["modify_column_families"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def drop_row_range( - self, - name, - row_key_prefix=None, - delete_all_data_from_table=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> client.drop_row_range(name) - - Args: - name (str): Required. The unique name of the table on which to drop a range of - rows. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be - zero length. - delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "drop_row_range" not in self._inner_api_calls: - self._inner_api_calls[ - "drop_row_range" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.drop_row_range, - default_retry=self._method_configs["DropRowRange"].retry, - default_timeout=self._method_configs["DropRowRange"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - row_key_prefix=row_key_prefix, - delete_all_data_from_table=delete_all_data_from_table, - ) - - request = bigtable_table_admin_pb2.DropRowRangeRequest( - name=name, - row_key_prefix=row_key_prefix, - delete_all_data_from_table=delete_all_data_from_table, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["drop_row_range"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def generate_consistency_token( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> response = client.generate_consistency_token(name) - - Args: - name (str): Required. The unique name of the Table for which to create a - consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "generate_consistency_token" not in self._inner_api_calls: - self._inner_api_calls[ - "generate_consistency_token" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.generate_consistency_token, - default_retry=self._method_configs["GenerateConsistencyToken"].retry, - default_timeout=self._method_configs[ - "GenerateConsistencyToken" - ].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["generate_consistency_token"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def check_consistency( - self, - name, - consistency_token, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `consistency_token`: - >>> consistency_token = '' - >>> - >>> response = client.check_consistency(name, consistency_token) - - Args: - name (str): Required. The unique name of the Table for which to check - replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - consistency_token (str): Required. The token created using GenerateConsistencyToken for the Table. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "check_consistency" not in self._inner_api_calls: - self._inner_api_calls[ - "check_consistency" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.check_consistency, - default_retry=self._method_configs["CheckConsistency"].retry, - default_timeout=self._method_configs["CheckConsistency"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, consistency_token=consistency_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["check_consistency"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_snapshot( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets metadata information about the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') - >>> - >>> response = client.get_snapshot(name) - - Args: - name (str): Required. The unique name of the requested snapshot. Values are of - the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_snapshot" not in self._inner_api_calls: - self._inner_api_calls[ - "get_snapshot" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_snapshot, - default_retry=self._method_configs["GetSnapshot"].retry, - default_timeout=self._method_configs["GetSnapshot"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_snapshot"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_snapshots( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all snapshots associated with the specified cluster. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # Iterate over all results - >>> for element in client.list_snapshots(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_snapshots(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The unique name of the cluster for which snapshots should - be listed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. Use - ``{cluster} = '-'`` to list snapshots for all clusters in an instance, - e.g., ``projects/{project}/instances/{instance}/clusters/-``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_snapshots" not in self._inner_api_calls: - self._inner_api_calls[ - "list_snapshots" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_snapshots, - default_retry=self._method_configs["ListSnapshots"].retry, - default_timeout=self._method_configs["ListSnapshots"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ListSnapshotsRequest( - parent=parent, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_snapshots"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="snapshots", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_snapshot( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') - >>> - >>> client.delete_snapshot(name) - - Args: - name (str): Required. The unique name of the snapshot to be deleted. Values are - of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_snapshot" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_snapshot" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_snapshot, - default_retry=self._method_configs["DeleteSnapshot"].retry, - default_timeout=self._method_configs["DeleteSnapshot"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_snapshot"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_backup( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets metadata on a pending or completed Cloud Bigtable Backup. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') - >>> - >>> response = client.get_backup(name) - - Args: - name (str): Required. Name of the backup. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "get_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_backup, - default_retry=self._method_configs["GetBackup"].retry, - default_timeout=self._method_configs["GetBackup"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GetBackupRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_backup( - self, - backup, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a pending or completed Cloud Bigtable Backup. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> # TODO: Initialize `backup`: - >>> backup = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_backup(backup, update_mask) - - Args: - backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to update. ``backup.name``, and the fields to - be updated as specified by ``update_mask`` are required. Other fields - are ignored. Update is only supported for the following fields: - - - ``backup.expire_time``. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Backup` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. A mask specifying which fields (e.g. ``expire_time``) in - the Backup resource should be updated. This mask is relative to the - Backup resource, not to the request message. The field mask must always - be specified; this prevents any future fields from being erased - accidentally by clients that do not know about them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "update_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_backup, - default_retry=self._method_configs["UpdateBackup"].retry, - default_timeout=self._method_configs["UpdateBackup"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.UpdateBackupRequest( - backup=backup, update_mask=update_mask, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("backup.name", backup.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_backup( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a pending or completed Cloud Bigtable backup. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') - >>> - >>> client.delete_backup(name) - - Args: - name (str): Required. Name of the backup to delete. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_backup, - default_retry=self._method_configs["DeleteBackup"].retry, - default_timeout=self._method_configs["DeleteBackup"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_backups( - self, - parent, - filter_=None, - order_by=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists Cloud Bigtable backups. Returns both completed and pending - backups. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # Iterate over all results - >>> for element in client.list_backups(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_backups(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The cluster to list backups from. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. Use - ``{cluster} = '-'`` to list backups for all clusters in an instance, - e.g., ``projects/{project}/instances/{instance}/clusters/-``. - filter_ (str): A filter expression that filters backups listed in the response. The - expression must specify the field name, a comparison operator, and the - value that you want to use for filtering. The value must be a string, a - number, or a boolean. The comparison operator must be <, >, <=, >=, !=, - =, or :. Colon ':' represents a HAS operator which is roughly synonymous - with equality. Filter rules are case insensitive. - - The fields eligible for filtering are: - - - ``name`` - - ``source_table`` - - ``state`` - - ``start_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - - ``end_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - - ``expire_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - - ``size_bytes`` - - To filter on multiple expressions, provide each separate expression - within parentheses. By default, each expression is an AND expression. - However, you can include AND, OR, and NOT expressions explicitly. - - Some examples of using filters are: - - - ``name:"exact"`` --> The backup's name is the string "exact". - - ``name:howl`` --> The backup's name contains the string "howl". - - ``source_table:prod`` --> The source_table's name contains the string - "prod". - - ``state:CREATING`` --> The backup is pending creation. - - ``state:READY`` --> The backup is fully created and ready for use. - - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` --> The - backup name contains the string "howl" and start_time of the backup - is before 2018-03-28T14:50:00Z. - - ``size_bytes > 10000000000`` --> The backup's size is greater than - 10GB - order_by (str): An expression for specifying the sort order of the results of the - request. The string value should specify one or more fields in - ``Backup``. The full syntax is described at - https://aip.dev/132#ordering. - - Fields supported are: \* name \* source_table \* expire_time \* - start_time \* end_time \* size_bytes \* state - - For example, "start_time". The default sorting order is ascending. To - specify descending order for the field, a suffix " desc" should be - appended to the field name. For example, "start_time desc". Redundant - space characters in the syntax are insigificant. - - If order_by is empty, results will be sorted by ``start_time`` in - descending order starting from the most recently created backup. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Backup` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_backups" not in self._inner_api_calls: - self._inner_api_calls[ - "list_backups" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_backups, - default_retry=self._method_configs["ListBackups"].retry, - default_timeout=self._method_configs["ListBackups"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ListBackupsRequest( - parent=parent, filter=filter_, order_by=order_by, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_backups"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="backups", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_iam_policy( - self, - resource, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the access control policy for a Table or Backup resource. - Returns an empty policy if the resource exists but does not have a policy - set. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> response = client.get_iam_policy(resource) - - Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_iam_policy( - self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the access control policy on a Table or Backup resource. - Replaces any existing policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.set_iam_policy(resource, policy) - - Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Policy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def test_iam_permissions( - self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns permissions that the caller has on the specified Table or Backup resource. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] - >>> - >>> response = client.test_iam_permissions(resource, permissions) - - Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions - with wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: - self._inner_api_calls[ - "test_iam_permissions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["test_iam_permissions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py deleted file mode 100644 index 85cc354a4..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py +++ /dev/null @@ -1,133 +0,0 @@ -config = { - "interfaces": { - "google.bigtable.admin.v2.BigtableTableAdmin": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "CreateTableFromSnapshot": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SnapshotTable": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "CreateBackup": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "RestoreTable": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "CreateTable": { - "timeout_millis": 130000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ListTables": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetTable": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "DeleteTable": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ModifyColumnFamilies": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DropRowRange": { - "timeout_millis": 900000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GenerateConsistencyToken": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "CheckConsistency": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetSnapshot": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListSnapshots": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "DeleteSnapshot": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetBackup": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "UpdateBackup": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteBackup": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ListBackups": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "TestIamPermissions": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/google/cloud/bigtable_admin_v2/gapic/enums.py b/google/cloud/bigtable_admin_v2/gapic/enums.py deleted file mode 100644 index c71bee34b..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/enums.py +++ /dev/null @@ -1,213 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class RestoreSourceType(enum.IntEnum): - """ - Indicates the type of the restore source. - - Attributes: - RESTORE_SOURCE_TYPE_UNSPECIFIED (int): No restore associated. - BACKUP (int): A backup was used as the source of the restore. - """ - - RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 - BACKUP = 1 - - -class StorageType(enum.IntEnum): - """ - Storage media types for persisting Bigtable data. - - Attributes: - STORAGE_TYPE_UNSPECIFIED (int): The user did not specify a storage type. - SSD (int): Flash (SSD) storage should be used. - HDD (int): Magnetic drive (HDD) storage should be used. - """ - - STORAGE_TYPE_UNSPECIFIED = 0 - SSD = 1 - HDD = 2 - - -class Backup(object): - class State(enum.IntEnum): - """ - Indicates the current state of the backup. - - Attributes: - STATE_UNSPECIFIED (int): Not specified. - CREATING (int): The pending backup is still being created. Operations on the backup - may fail with ``FAILED_PRECONDITION`` in this state. - READY (int): The backup is complete and ready for use. - """ - - STATE_UNSPECIFIED = 0 - CREATING = 1 - READY = 2 - - -class Cluster(object): - class State(enum.IntEnum): - """ - Possible states of a cluster. - - Attributes: - STATE_NOT_KNOWN (int): The state of the cluster could not be determined. - READY (int): The cluster has been successfully created and is ready to serve requests. - CREATING (int): The cluster is currently being created, and may be destroyed - if the creation process encounters an error. - A cluster may not be able to serve requests while being created. - RESIZING (int): The cluster is currently being resized, and may revert to its previous - node count if the process encounters an error. - A cluster is still capable of serving requests while being resized, - but may exhibit performance as if its number of allocated nodes is - between the starting and requested states. - DISABLED (int): The cluster has no backing nodes. The data (tables) still - exist, but no operations can be performed on the cluster. - """ - - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - RESIZING = 3 - DISABLED = 4 - - -class Instance(object): - class State(enum.IntEnum): - """ - Possible states of an instance. - - Attributes: - STATE_NOT_KNOWN (int): The state of the instance could not be determined. - READY (int): The instance has been successfully created and can serve requests - to its tables. - CREATING (int): The instance is currently being created, and may be destroyed - if the creation process encounters an error. - """ - - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - class Type(enum.IntEnum): - """ - The type of the instance. - - Attributes: - TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an - instance, a ``PRODUCTION`` instance will be created. If set when - updating an instance, the type will be left unchanged. - PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set on - the cluster. - DEVELOPMENT (int): The instance is meant for development and testing purposes only; it - has no performance or uptime guarantees and is not covered by SLA. After - a development instance is created, it can be upgraded by updating the - instance to type ``PRODUCTION``. An instance created as a production - instance cannot be changed to a development instance. When creating a - development instance, ``serve_nodes`` on the cluster must not be set. - """ - - TYPE_UNSPECIFIED = 0 - PRODUCTION = 1 - DEVELOPMENT = 2 - - -class Snapshot(object): - class State(enum.IntEnum): - """ - Possible states of a snapshot. - - Attributes: - STATE_NOT_KNOWN (int): The state of the snapshot could not be determined. - READY (int): The snapshot has been successfully created and can serve all requests. - CREATING (int): The snapshot is currently being created, and may be destroyed if the - creation process encounters an error. A snapshot may not be restored to a - table while it is being created. - """ - - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - -class Table(object): - class TimestampGranularity(enum.IntEnum): - """ - Possible timestamp granularities to use when keeping multiple versions - of data in a table. - - Attributes: - TIMESTAMP_GRANULARITY_UNSPECIFIED (int): The user did not specify a granularity. Should not be returned. - When specified during table creation, MILLIS will be used. - MILLIS (int): The table keeps data versioned at a granularity of 1ms. - """ - - TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 - MILLIS = 1 - - class View(enum.IntEnum): - """ - Defines a view over a table's fields. - - Attributes: - VIEW_UNSPECIFIED (int): Uses the default view for each method as documented in its request. - NAME_ONLY (int): Only populates ``name``. - SCHEMA_VIEW (int): Only populates ``name`` and fields related to the table's schema. - REPLICATION_VIEW (int): Only populates ``name`` and fields related to the table's - replication state. - FULL (int): Populates all fields. - """ - - VIEW_UNSPECIFIED = 0 - NAME_ONLY = 1 - SCHEMA_VIEW = 2 - REPLICATION_VIEW = 3 - FULL = 4 - - class ClusterState(object): - class ReplicationState(enum.IntEnum): - """ - Table replication states. - - Attributes: - STATE_NOT_KNOWN (int): The replication state of the table is unknown in this cluster. - INITIALIZING (int): The cluster was recently created, and the table must finish copying - over pre-existing data from other clusters before it can begin - receiving live replication updates and serving Data API requests. - PLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this - cluster due to planned internal maintenance. - UNPLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this - cluster due to unplanned or emergency maintenance. - READY (int): The table can serve Data API requests from this cluster. Depending on - replication delay, reads may not immediately reflect the state of the - table in other clusters. - READY_OPTIMIZING (int): The table is fully created and ready for use after a restore, and is - being optimized for performance. When optimizations are complete, the - table will transition to ``READY`` state. - """ - - STATE_NOT_KNOWN = 0 - INITIALIZING = 1 - PLANNED_MAINTENANCE = 2 - UNPLANNED_MAINTENANCE = 3 - READY = 4 - READY_OPTIMIZING = 5 diff --git a/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py b/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py deleted file mode 100644 index 7ca758edc..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ /dev/null @@ -1,380 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc - - -class BigtableInstanceAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.bigtable.admin.v2 BigtableInstanceAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - def __init__( - self, channel=None, credentials=None, address="bigtableadmin.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "bigtable_instance_admin_stub": bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub( - channel - ), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="bigtableadmin.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_instance`. - - Create an instance within a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].CreateInstance - - @property - def partial_update_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.partial_update_instance`. - - Partially updates an instance within a project. This method can modify all - fields of an Instance and is the preferred way to update an Instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].PartialUpdateInstance - - @property - def create_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_cluster`. - - Creates a cluster within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].CreateCluster - - @property - def update_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_cluster`. - - Updates a cluster within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateCluster - - @property - def update_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_app_profile`. - - Updates an app profile within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateAppProfile - - @property - def get_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_instance`. - - Gets information about an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetInstance - - @property - def list_instances(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_instances`. - - Lists information about instances in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].ListInstances - - @property - def update_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_instance`. - - Updates an instance within a project. This method updates only the display - name and type for an Instance. To update other Instance properties, such as - labels, use PartialUpdateInstance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateInstance - - @property - def delete_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_instance`. - - Delete an instance from a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].DeleteInstance - - @property - def get_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_cluster`. - - Gets information about a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetCluster - - @property - def list_clusters(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_clusters`. - - Lists information about clusters in an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].ListClusters - - @property - def delete_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_cluster`. - - Deletes a cluster from an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].DeleteCluster - - @property - def create_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_app_profile`. - - Creates an app profile within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].CreateAppProfile - - @property - def get_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_app_profile`. - - Gets information about an app profile. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetAppProfile - - @property - def list_app_profiles(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_app_profiles`. - - Lists information about app profiles in an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].ListAppProfiles - - @property - def delete_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_app_profile`. - - Deletes an app profile from an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].DeleteAppProfile - - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_iam_policy`. - - Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetIamPolicy - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.set_iam_policy`. - - Sets the access control policy on an instance resource. Replaces any - existing policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].SetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified instance resource. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].TestIamPermissions diff --git a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py deleted file mode 100644 index 68bee69ae..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ /dev/null @@ -1,471 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2_grpc - - -class BigtableTableAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.bigtable.admin.v2 BigtableTableAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - def __init__( - self, channel=None, credentials=None, address="bigtableadmin.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "bigtable_table_admin_stub": bigtable_table_admin_pb2_grpc.BigtableTableAdminStub( - channel - ), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="bigtableadmin.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_table_from_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table_from_snapshot`. - - Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CreateTableFromSnapshot - - @property - def snapshot_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.snapshot_table`. - - Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].SnapshotTable - - @property - def create_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_backup`. - - Starts creating a new Cloud Bigtable Backup. The returned backup - ``long-running operation`` can be used to track creation of the backup. - The ``metadata`` field type is ``CreateBackupMetadata``. The - ``response`` field type is ``Backup``, if successful. Cancelling the - returned operation will stop the creation and delete the backup. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CreateBackup - - @property - def restore_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.restore_table`. - - Create a new table by restoring from a completed backup. The new - table must be in the same instance as the instance containing the - backup. The returned table ``long-running operation`` can be used to - track the progress of the operation, and to cancel it. The ``metadata`` - field type is ``RestoreTableMetadata``. The ``response`` type is - ``Table``, if successful. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].RestoreTable - - @property - def create_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table`. - - Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CreateTable - - @property - def list_tables(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_tables`. - - Lists all tables served from a specified instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ListTables - - @property - def get_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_table`. - - Gets metadata information about the specified table. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetTable - - @property - def delete_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_table`. - - Permanently deletes a specified table and all of its data. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DeleteTable - - @property - def modify_column_families(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.modify_column_families`. - - Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ModifyColumnFamilies - - @property - def drop_row_range(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.drop_row_range`. - - Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DropRowRange - - @property - def generate_consistency_token(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.generate_consistency_token`. - - Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GenerateConsistencyToken - - @property - def check_consistency(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.check_consistency`. - - Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CheckConsistency - - @property - def get_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_snapshot`. - - Gets metadata information about the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetSnapshot - - @property - def list_snapshots(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_snapshots`. - - Lists all snapshots associated with the specified cluster. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ListSnapshots - - @property - def delete_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_snapshot`. - - Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DeleteSnapshot - - @property - def get_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_backup`. - - Gets metadata on a pending or completed Cloud Bigtable Backup. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetBackup - - @property - def update_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.update_backup`. - - Updates a pending or completed Cloud Bigtable Backup. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].UpdateBackup - - @property - def delete_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_backup`. - - Deletes a pending or completed Cloud Bigtable backup. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DeleteBackup - - @property - def list_backups(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_backups`. - - Lists Cloud Bigtable backups. Returns both completed and pending - backups. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ListBackups - - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_iam_policy`. - - Gets the access control policy for a Table or Backup resource. - Returns an empty policy if the resource exists but does not have a policy - set. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetIamPolicy - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.set_iam_policy`. - - Sets the access control policy on a Table or Backup resource. - Replaces any existing policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].SetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified Table or Backup resource. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].TestIamPermissions diff --git a/google/cloud/bigtable_admin_v2/proto/__init__.py b/google/cloud/bigtable_admin_v2/proto/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py deleted file mode 100644 index 3f4c9e3cc..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ /dev/null @@ -1,2434 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\nBgoogle/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/bigtable_admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xdb\x02\n\x15\x43reateInstanceRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x18\n\x0binstance_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12T\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntryB\x03\xe0\x41\x02\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01"L\n\x12GetInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"o\n\x14ListInstancesRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x8f\x01\n\x1cPartialUpdateInstanceRequest\x12\x39\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"O\n\x15\x44\x65leteInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"\xa2\x01\n\x14\x43reateClusterRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x17\n\ncluster_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.ClusterB\x03\xe0\x41\x02"J\n\x11GetClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"c\n\x13ListClustersRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"M\n\x14\x44\x65leteClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc9\x01\n\x17\x43reateAppProfileRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12>\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"P\n\x14GetAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile"y\n\x16ListAppProfilesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\xa8\x01\n\x17UpdateAppProfileRequest\x12>\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"q\n\x17\x44\x65leteAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile\x12\x1c\n\x0fignore_warnings\x18\x02 \x01(\x08\x42\x03\xe0\x41\x02"\x1a\n\x18UpdateAppProfileMetadata2\x92\x1e\n\x15\x42igtableInstanceAdmin\x12\xda\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\xda\x41$parent,instance_id,instance,clusters\xca\x41"\n\x08Instance\x12\x16\x43reateInstanceMetadata\x12\x91\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance"0\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xa4\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse"2\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\xda\x41\x06parent\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xe8\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\xda\x41\x14instance,update_mask\xca\x41"\n\x08Instance\x12\x16UpdateInstanceMetadata\x12\x8b\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty"0\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xdc\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"|\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\xda\x41\x19parent,cluster_id,cluster\xca\x41 \n\x07\x43luster\x12\x15\x43reateClusterMetadata\x12\x99\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster";\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xac\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"=\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\xda\x41\x06parent\x12\xad\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"Z\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\xca\x41 \n\x07\x43luster\x12\x15UpdateClusterMetadata\x12\x94\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty";\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xd5\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"h\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\xda\x41!parent,app_profile_id,app_profile\x12\xa5\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile">\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\xb8\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\xda\x41\x06parent\x12\xfa\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"\x93\x01\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\xda\x41\x17\x61pp_profile,update_mask\xca\x41&\n\nAppProfile\x12\x18UpdateAppProfileMetadata\x12\x9d\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty">\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\x93\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"H\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\x9a\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"O\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xc5\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"Z\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\x9a\x03\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xf7\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xe2\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_CREATEINSTANCEREQUEST_CLUSTERSENTRY = _descriptor.Descriptor( - name="ClustersEntry", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=723, - serialized_end=805, -) - -_CREATEINSTANCEREQUEST = _descriptor.Descriptor( - name="CreateInstanceRequest", - full_name="google.bigtable.admin.v2.CreateInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance_id", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.instance_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.instance", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="clusters", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.clusters", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=458, - serialized_end=805, -) - - -_GETINSTANCEREQUEST = _descriptor.Descriptor( - name="GetInstanceRequest", - full_name="google.bigtable.admin.v2.GetInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetInstanceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=807, - serialized_end=883, -) - - -_LISTINSTANCESREQUEST = _descriptor.Descriptor( - name="ListInstancesRequest", - full_name="google.bigtable.admin.v2.ListInstancesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListInstancesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListInstancesRequest.page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=885, - serialized_end=996, -) - - -_LISTINSTANCESRESPONSE = _descriptor.Descriptor( - name="ListInstancesResponse", - full_name="google.bigtable.admin.v2.ListInstancesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instances", - full_name="google.bigtable.admin.v2.ListInstancesResponse.instances", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="failed_locations", - full_name="google.bigtable.admin.v2.ListInstancesResponse.failed_locations", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListInstancesResponse.next_page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=999, - serialized_end=1128, -) - - -_PARTIALUPDATEINSTANCEREQUEST = _descriptor.Descriptor( - name="PartialUpdateInstanceRequest", - full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instance", - full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest.instance", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1131, - serialized_end=1274, -) - - -_DELETEINSTANCEREQUEST = _descriptor.Descriptor( - name="DeleteInstanceRequest", - full_name="google.bigtable.admin.v2.DeleteInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteInstanceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1276, - serialized_end=1355, -) - - -_CREATECLUSTERREQUEST = _descriptor.Descriptor( - name="CreateClusterRequest", - full_name="google.bigtable.admin.v2.CreateClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateClusterRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_id", - full_name="google.bigtable.admin.v2.CreateClusterRequest.cluster_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster", - full_name="google.bigtable.admin.v2.CreateClusterRequest.cluster", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1358, - serialized_end=1520, -) - - -_GETCLUSTERREQUEST = _descriptor.Descriptor( - name="GetClusterRequest", - full_name="google.bigtable.admin.v2.GetClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetClusterRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1522, - serialized_end=1596, -) - - -_LISTCLUSTERSREQUEST = _descriptor.Descriptor( - name="ListClustersRequest", - full_name="google.bigtable.admin.v2.ListClustersRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListClustersRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListClustersRequest.page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1598, - serialized_end=1697, -) - - -_LISTCLUSTERSRESPONSE = _descriptor.Descriptor( - name="ListClustersResponse", - full_name="google.bigtable.admin.v2.ListClustersResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="clusters", - full_name="google.bigtable.admin.v2.ListClustersResponse.clusters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="failed_locations", - full_name="google.bigtable.admin.v2.ListClustersResponse.failed_locations", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListClustersResponse.next_page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1699, - serialized_end=1825, -) - - -_DELETECLUSTERREQUEST = _descriptor.Descriptor( - name="DeleteClusterRequest", - full_name="google.bigtable.admin.v2.DeleteClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteClusterRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1827, - serialized_end=1904, -) - - -_CREATEINSTANCEMETADATA = _descriptor.Descriptor( - name="CreateInstanceMetadata", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1907, - serialized_end=2105, -) - - -_UPDATEINSTANCEMETADATA = _descriptor.Descriptor( - name="UpdateInstanceMetadata", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2108, - serialized_end=2313, -) - - -_CREATECLUSTERMETADATA = _descriptor.Descriptor( - name="CreateClusterMetadata", - full_name="google.bigtable.admin.v2.CreateClusterMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.CreateClusterMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.CreateClusterMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.CreateClusterMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2316, - serialized_end=2512, -) - - -_UPDATECLUSTERMETADATA = _descriptor.Descriptor( - name="UpdateClusterMetadata", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2515, - serialized_end=2698, -) - - -_CREATEAPPPROFILEREQUEST = _descriptor.Descriptor( - name="CreateAppProfileRequest", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.app_profile_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.app_profile", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ignore_warnings", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.ignore_warnings", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2701, - serialized_end=2902, -) - - -_GETAPPPROFILEREQUEST = _descriptor.Descriptor( - name="GetAppProfileRequest", - full_name="google.bigtable.admin.v2.GetAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetAppProfileRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A$\n"bigtable.googleapis.com/AppProfile', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2904, - serialized_end=2984, -) - - -_LISTAPPPROFILESREQUEST = _descriptor.Descriptor( - name="ListAppProfilesRequest", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest.page_size", - index=1, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest.page_token", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2986, - serialized_end=3107, -) - - -_LISTAPPPROFILESRESPONSE = _descriptor.Descriptor( - name="ListAppProfilesResponse", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="app_profiles", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse.app_profiles", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="failed_locations", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse.failed_locations", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3110, - serialized_end=3246, -) - - -_UPDATEAPPPROFILEREQUEST = _descriptor.Descriptor( - name="UpdateAppProfileRequest", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="app_profile", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.app_profile", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ignore_warnings", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.ignore_warnings", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3249, - serialized_end=3417, -) - - -_DELETEAPPPROFILEREQUEST = _descriptor.Descriptor( - name="DeleteAppProfileRequest", - full_name="google.bigtable.admin.v2.DeleteAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteAppProfileRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A$\n"bigtable.googleapis.com/AppProfile', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ignore_warnings", - full_name="google.bigtable.admin.v2.DeleteAppProfileRequest.ignore_warnings", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3419, - serialized_end=3532, -) - - -_UPDATEAPPPROFILEMETADATA = _descriptor.Descriptor( - name="UpdateAppProfileMetadata", - full_name="google.bigtable.admin.v2.UpdateAppProfileMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3534, - serialized_end=3560, -) - -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name[ - "value" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.containing_type = _CREATEINSTANCEREQUEST -_CREATEINSTANCEREQUEST.fields_by_name[ - "instance" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE -) -_CREATEINSTANCEREQUEST.fields_by_name[ - "clusters" -].message_type = _CREATEINSTANCEREQUEST_CLUSTERSENTRY -_LISTINSTANCESRESPONSE.fields_by_name[ - "instances" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE -) -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ - "instance" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE -) -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_CREATECLUSTERREQUEST.fields_by_name[ - "cluster" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_LISTCLUSTERSRESPONSE.fields_by_name[ - "clusters" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_CREATEINSTANCEMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATEINSTANCEREQUEST -_CREATEINSTANCEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEINSTANCEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name[ - "original_request" -].message_type = _PARTIALUPDATEINSTANCEREQUEST -_UPDATEINSTANCEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATECLUSTERMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATECLUSTERREQUEST -_CREATECLUSTERMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATECLUSTERMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATECLUSTERMETADATA.fields_by_name[ - "original_request" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_UPDATECLUSTERMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATECLUSTERMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEAPPPROFILEREQUEST.fields_by_name[ - "app_profile" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -) -_LISTAPPPROFILESRESPONSE.fields_by_name[ - "app_profiles" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -) -_UPDATEAPPPROFILEREQUEST.fields_by_name[ - "app_profile" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -) -_UPDATEAPPPROFILEREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -DESCRIPTOR.message_types_by_name["CreateInstanceRequest"] = _CREATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["GetInstanceRequest"] = _GETINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["ListInstancesRequest"] = _LISTINSTANCESREQUEST -DESCRIPTOR.message_types_by_name["ListInstancesResponse"] = _LISTINSTANCESRESPONSE -DESCRIPTOR.message_types_by_name[ - "PartialUpdateInstanceRequest" -] = _PARTIALUPDATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["DeleteInstanceRequest"] = _DELETEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["CreateClusterRequest"] = _CREATECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["GetClusterRequest"] = _GETCLUSTERREQUEST -DESCRIPTOR.message_types_by_name["ListClustersRequest"] = _LISTCLUSTERSREQUEST -DESCRIPTOR.message_types_by_name["ListClustersResponse"] = _LISTCLUSTERSRESPONSE -DESCRIPTOR.message_types_by_name["DeleteClusterRequest"] = _DELETECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["CreateInstanceMetadata"] = _CREATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name["UpdateInstanceMetadata"] = _UPDATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name["CreateClusterMetadata"] = _CREATECLUSTERMETADATA -DESCRIPTOR.message_types_by_name["UpdateClusterMetadata"] = _UPDATECLUSTERMETADATA -DESCRIPTOR.message_types_by_name["CreateAppProfileRequest"] = _CREATEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["GetAppProfileRequest"] = _GETAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["ListAppProfilesRequest"] = _LISTAPPPROFILESREQUEST -DESCRIPTOR.message_types_by_name["ListAppProfilesResponse"] = _LISTAPPPROFILESRESPONSE -DESCRIPTOR.message_types_by_name["UpdateAppProfileRequest"] = _UPDATEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["DeleteAppProfileRequest"] = _DELETEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["UpdateAppProfileMetadata"] = _UPDATEAPPPROFILEMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -CreateInstanceRequest = _reflection.GeneratedProtocolMessageType( - "CreateInstanceRequest", - (_message.Message,), - { - "ClustersEntry": _reflection.GeneratedProtocolMessageType( - "ClustersEntry", - (_message.Message,), - { - "DESCRIPTOR": _CREATEINSTANCEREQUEST_CLUSTERSENTRY, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) - }, - ), - "DESCRIPTOR": _CREATEINSTANCEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.CreateInstance. - - Attributes: - parent: - Required. The unique name of the project in which to create - the new instance. Values are of the form - ``projects/{project}``. - instance_id: - Required. The ID to be used when referring to the new instance - within its project, e.g., just ``myinstance`` rather than - ``projects/myproject/instances/myinstance``. - instance: - Required. The instance to create. Fields marked ``OutputOnly`` - must be left blank. - clusters: - Required. The clusters to be created within the instance, - mapped by desired cluster ID, e.g., just ``mycluster`` rather - than ``projects/myproject/instances/myinstance/clusters/myclus - ter``. Fields marked ``OutputOnly`` must be left blank. - Currently, at most four clusters can be specified. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) - }, -) -_sym_db.RegisterMessage(CreateInstanceRequest) -_sym_db.RegisterMessage(CreateInstanceRequest.ClustersEntry) - -GetInstanceRequest = _reflection.GeneratedProtocolMessageType( - "GetInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETINSTANCEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.GetInstance. - - Attributes: - name: - Required. The unique name of the requested instance. Values - are of the form ``projects/{project}/instances/{instance}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) - }, -) -_sym_db.RegisterMessage(GetInstanceRequest) - -ListInstancesRequest = _reflection.GeneratedProtocolMessageType( - "ListInstancesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTINSTANCESREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.ListInstances. - - Attributes: - parent: - Required. The unique name of the project for which a list of - instances is requested. Values are of the form - ``projects/{project}``. - page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) - }, -) -_sym_db.RegisterMessage(ListInstancesRequest) - -ListInstancesResponse = _reflection.GeneratedProtocolMessageType( - "ListInstancesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTINSTANCESRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Response message for BigtableInstanceAdmin.ListInstances. - - Attributes: - instances: - The list of requested instances. - failed_locations: - Locations from which Instance information could not be - retrieved, due to an outage or some other transient condition. - Instances whose Clusters are all in one of the failed - locations may be missing from ``instances``, and Instances - with at least one Cluster in a failed location may only have - partial information returned. Values are of the form - ``projects//locations/`` - next_page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) - }, -) -_sym_db.RegisterMessage(ListInstancesResponse) - -PartialUpdateInstanceRequest = _reflection.GeneratedProtocolMessageType( - "PartialUpdateInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _PARTIALUPDATEINSTANCEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.PartialUpdateInstance. - - Attributes: - instance: - Required. The Instance which will (partially) replace the - current value. - update_mask: - Required. The subset of Instance fields which should be - replaced. Must be explicitly set. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.PartialUpdateInstanceRequest) - }, -) -_sym_db.RegisterMessage(PartialUpdateInstanceRequest) - -DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType( - "DeleteInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEINSTANCEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.DeleteInstance. - - Attributes: - name: - Required. The unique name of the instance to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) - }, -) -_sym_db.RegisterMessage(DeleteInstanceRequest) - -CreateClusterRequest = _reflection.GeneratedProtocolMessageType( - "CreateClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATECLUSTERREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.CreateCluster. - - Attributes: - parent: - Required. The unique name of the instance in which to create - the new cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - cluster_id: - Required. The ID to be used when referring to the new cluster - within its instance, e.g., just ``mycluster`` rather than ``pr - ojects/myproject/instances/myinstance/clusters/mycluster``. - cluster: - Required. The cluster to be created. Fields marked - ``OutputOnly`` must be left blank. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) - }, -) -_sym_db.RegisterMessage(CreateClusterRequest) - -GetClusterRequest = _reflection.GeneratedProtocolMessageType( - "GetClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETCLUSTERREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.GetCluster. - - Attributes: - name: - Required. The unique name of the requested cluster. Values are - of the form ``projects/{project}/instances/{instance}/clusters - /{cluster}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) - }, -) -_sym_db.RegisterMessage(GetClusterRequest) - -ListClustersRequest = _reflection.GeneratedProtocolMessageType( - "ListClustersRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.ListClusters. - - Attributes: - parent: - Required. The unique name of the instance for which a list of - clusters is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use ``{instance} - = '-'`` to list Clusters for all Instances in a project, e.g., - ``projects/myproject/instances/-``. - page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) - }, -) -_sym_db.RegisterMessage(ListClustersRequest) - -ListClustersResponse = _reflection.GeneratedProtocolMessageType( - "ListClustersResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Response message for BigtableInstanceAdmin.ListClusters. - - Attributes: - clusters: - The list of requested clusters. - failed_locations: - Locations from which Cluster information could not be - retrieved, due to an outage or some other transient condition. - Clusters from these locations may be missing from - ``clusters``, or may only have partial information returned. - Values are of the form - ``projects//locations/`` - next_page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) - }, -) -_sym_db.RegisterMessage(ListClustersResponse) - -DeleteClusterRequest = _reflection.GeneratedProtocolMessageType( - "DeleteClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETECLUSTERREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.DeleteCluster. - - Attributes: - name: - Required. The unique name of the cluster to be deleted. Values - are of the form ``projects/{project}/instances/{instance}/clus - ters/{cluster}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) - }, -) -_sym_db.RegisterMessage(DeleteClusterRequest) - -CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType( - "CreateInstanceMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATEINSTANCEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by CreateInstance. - - Attributes: - original_request: - The request that prompted the initiation of this - CreateInstance operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) - }, -) -_sym_db.RegisterMessage(CreateInstanceMetadata) - -UpdateInstanceMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateInstanceMetadata", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEINSTANCEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by UpdateInstance. - - Attributes: - original_request: - The request that prompted the initiation of this - UpdateInstance operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateInstanceMetadata) - }, -) -_sym_db.RegisterMessage(UpdateInstanceMetadata) - -CreateClusterMetadata = _reflection.GeneratedProtocolMessageType( - "CreateClusterMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATECLUSTERMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by CreateCluster. - - Attributes: - original_request: - The request that prompted the initiation of this CreateCluster - operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterMetadata) - }, -) -_sym_db.RegisterMessage(CreateClusterMetadata) - -UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateClusterMetadata", - (_message.Message,), - { - "DESCRIPTOR": _UPDATECLUSTERMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by UpdateCluster. - - Attributes: - original_request: - The request that prompted the initiation of this UpdateCluster - operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) - }, -) -_sym_db.RegisterMessage(UpdateClusterMetadata) - -CreateAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "CreateAppProfileRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEAPPPROFILEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.CreateAppProfile. - - Attributes: - parent: - Required. The unique name of the instance in which to create - the new app profile. Values are of the form - ``projects/{project}/instances/{instance}``. - app_profile_id: - Required. The ID to be used when referring to the new app - profile within its instance, e.g., just ``myprofile`` rather - than ``projects/myproject/instances/myinstance/appProfiles/myp - rofile``. - app_profile: - Required. The app profile to be created. Fields marked - ``OutputOnly`` will be ignored. - ignore_warnings: - If true, ignore safety checks when creating the app profile. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateAppProfileRequest) - }, -) -_sym_db.RegisterMessage(CreateAppProfileRequest) - -GetAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "GetAppProfileRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETAPPPROFILEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.GetAppProfile. - - Attributes: - name: - Required. The unique name of the requested app profile. Values - are of the form ``projects/{project}/instances/{instance}/appP - rofiles/{app_profile}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetAppProfileRequest) - }, -) -_sym_db.RegisterMessage(GetAppProfileRequest) - -ListAppProfilesRequest = _reflection.GeneratedProtocolMessageType( - "ListAppProfilesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTAPPPROFILESREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.ListAppProfiles. - - Attributes: - parent: - Required. The unique name of the instance for which a list of - app profiles is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use ``{instance} - = '-'`` to list AppProfiles for all Instances in a project, - e.g., ``projects/myproject/instances/-``. - page_size: - Maximum number of results per page. A page_size of zero lets - the server choose the number of items to return. A page_size - which is strictly positive will return at most that many - items. A negative page_size will cause an error. Following - the first request, subsequent paginated calls are not required - to pass a page_size. If a page_size is set in subsequent - calls, it must match the page_size given in the first request. - page_token: - The value of ``next_page_token`` returned by a previous call. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesRequest) - }, -) -_sym_db.RegisterMessage(ListAppProfilesRequest) - -ListAppProfilesResponse = _reflection.GeneratedProtocolMessageType( - "ListAppProfilesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTAPPPROFILESRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Response message for BigtableInstanceAdmin.ListAppProfiles. - - Attributes: - app_profiles: - The list of requested app profiles. - next_page_token: - Set if not all app profiles could be returned in a single - response. Pass this value to ``page_token`` in another request - to get the next page of results. - failed_locations: - Locations from which AppProfile information could not be - retrieved, due to an outage or some other transient condition. - AppProfiles from these locations may be missing from - ``app_profiles``. Values are of the form - ``projects//locations/`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesResponse) - }, -) -_sym_db.RegisterMessage(ListAppProfilesResponse) - -UpdateAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "UpdateAppProfileRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEAPPPROFILEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.UpdateAppProfile. - - Attributes: - app_profile: - Required. The app profile which will (partially) replace the - current value. - update_mask: - Required. The subset of app profile fields which should be - replaced. If unset, all fields will be replaced. - ignore_warnings: - If true, ignore safety checks when updating the app profile. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileRequest) - }, -) -_sym_db.RegisterMessage(UpdateAppProfileRequest) - -DeleteAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "DeleteAppProfileRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEAPPPROFILEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.DeleteAppProfile. - - Attributes: - name: - Required. The unique name of the app profile to be deleted. - Values are of the form ``projects/{project}/instances/{instanc - e}/appProfiles/{app_profile}``. - ignore_warnings: - Required. If true, ignore safety checks when deleting the app - profile. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteAppProfileRequest) - }, -) -_sym_db.RegisterMessage(DeleteAppProfileRequest) - -UpdateAppProfileMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateAppProfileMetadata", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEAPPPROFILEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by UpdateAppProfile.""", - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileMetadata) - }, -) -_sym_db.RegisterMessage(UpdateAppProfileMetadata) - - -DESCRIPTOR._options = None -_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = None -_CREATEINSTANCEREQUEST.fields_by_name["parent"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["instance_id"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["instance"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["clusters"]._options = None -_GETINSTANCEREQUEST.fields_by_name["name"]._options = None -_LISTINSTANCESREQUEST.fields_by_name["parent"]._options = None -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name["instance"]._options = None -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name["update_mask"]._options = None -_DELETEINSTANCEREQUEST.fields_by_name["name"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["parent"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["cluster_id"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["cluster"]._options = None -_GETCLUSTERREQUEST.fields_by_name["name"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["parent"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["name"]._options = None -_CREATEAPPPROFILEREQUEST.fields_by_name["parent"]._options = None -_CREATEAPPPROFILEREQUEST.fields_by_name["app_profile_id"]._options = None -_CREATEAPPPROFILEREQUEST.fields_by_name["app_profile"]._options = None -_GETAPPPROFILEREQUEST.fields_by_name["name"]._options = None -_LISTAPPPROFILESREQUEST.fields_by_name["parent"]._options = None -_UPDATEAPPPROFILEREQUEST.fields_by_name["app_profile"]._options = None -_UPDATEAPPPROFILEREQUEST.fields_by_name["update_mask"]._options = None -_DELETEAPPPROFILEREQUEST.fields_by_name["name"]._options = None -_DELETEAPPPROFILEREQUEST.fields_by_name["ignore_warnings"]._options = None - -_BIGTABLEINSTANCEADMIN = _descriptor.ServiceDescriptor( - name="BigtableInstanceAdmin", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\034bigtableadmin.googleapis.com\322A\367\002https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", - create_key=_descriptor._internal_create_key, - serialized_start=3563, - serialized_end=7421, - methods=[ - _descriptor.MethodDescriptor( - name="CreateInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance", - index=0, - containing_service=None, - input_type=_CREATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002&"!/v2/{parent=projects/*}/instances:\001*\332A$parent,instance_id,instance,clusters\312A"\n\010Instance\022\026CreateInstanceMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance", - index=1, - containing_service=None, - input_type=_GETINSTANCEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - serialized_options=b"\202\323\344\223\002#\022!/v2/{name=projects/*/instances/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListInstances", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances", - index=2, - containing_service=None, - input_type=_LISTINSTANCESREQUEST, - output_type=_LISTINSTANCESRESPONSE, - serialized_options=b"\202\323\344\223\002#\022!/v2/{parent=projects/*}/instances\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", - index=3, - containing_service=None, - input_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - serialized_options=b"\202\323\344\223\002&\032!/v2/{name=projects/*/instances/*}:\001*", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="PartialUpdateInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance", - index=4, - containing_service=None, - input_type=_PARTIALUPDATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\00262*/v2/{instance.name=projects/*/instances/*}:\010instance\332A\024instance,update_mask\312A"\n\010Instance\022\026UpdateInstanceMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance", - index=5, - containing_service=None, - input_type=_DELETEINSTANCEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002#*!/v2/{name=projects/*/instances/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster", - index=6, - containing_service=None, - input_type=_CREATECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0027",/v2/{parent=projects/*/instances/*}/clusters:\007cluster\332A\031parent,cluster_id,cluster\312A \n\007Cluster\022\025CreateClusterMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster", - index=7, - containing_service=None, - input_type=_GETCLUSTERREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER, - serialized_options=b"\202\323\344\223\002.\022,/v2/{name=projects/*/instances/*/clusters/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListClusters", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters", - index=8, - containing_service=None, - input_type=_LISTCLUSTERSREQUEST, - output_type=_LISTCLUSTERSRESPONSE, - serialized_options=b"\202\323\344\223\002.\022,/v2/{parent=projects/*/instances/*}/clusters\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", - index=9, - containing_service=None, - input_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\0021\032,/v2/{name=projects/*/instances/*/clusters/*}:\001*\312A \n\007Cluster\022\025UpdateClusterMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster", - index=10, - containing_service=None, - input_type=_DELETECLUSTERREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002.*,/v2/{name=projects/*/instances/*/clusters/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile", - index=11, - containing_service=None, - input_type=_CREATEAPPPROFILEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - serialized_options=b'\202\323\344\223\002>"//v2/{parent=projects/*/instances/*}/appProfiles:\013app_profile\332A!parent,app_profile_id,app_profile', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile", - index=12, - containing_service=None, - input_type=_GETAPPPROFILEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - serialized_options=b"\202\323\344\223\0021\022//v2/{name=projects/*/instances/*/appProfiles/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListAppProfiles", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles", - index=13, - containing_service=None, - input_type=_LISTAPPPROFILESREQUEST, - output_type=_LISTAPPPROFILESRESPONSE, - serialized_options=b"\202\323\344\223\0021\022//v2/{parent=projects/*/instances/*}/appProfiles\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile", - index=14, - containing_service=None, - input_type=_UPDATEAPPPROFILEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\002J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\013app_profile\332A\027app_profile,update_mask\312A&\n\nAppProfile\022\030UpdateAppProfileMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile", - index=15, - containing_service=None, - input_type=_DELETEAPPPROFILEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\0021*//v2/{name=projects/*/instances/*/appProfiles/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy", - index=16, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\001*\332A\010resource', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy", - index=17, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\001*\332A\017resource,policy', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="TestIamPermissions", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions", - index=18, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, - output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - serialized_options=b'\202\323\344\223\002="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\001*\332A\024resource,permissions', - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_BIGTABLEINSTANCEADMIN) - -DESCRIPTOR.services_by_name["BigtableInstanceAdmin"] = _BIGTABLEINSTANCEADMIN - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py deleted file mode 100644 index 8b736d31d..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py +++ /dev/null @@ -1,933 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2, -) -from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class BigtableInstanceAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - ) - self.ListInstances = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, - ) - self.UpdateInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - ) - self.PartialUpdateInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, - ) - self.ListClusters = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, - ) - self.UpdateCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - ) - self.GetAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - ) - self.ListAppProfiles = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, - ) - self.UpdateAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.GetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.SetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.TestIamPermissions = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - ) - - -class BigtableInstanceAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - def CreateInstance(self, request, context): - """Create an instance within a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetInstance(self, request, context): - """Gets information about an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListInstances(self, request, context): - """Lists information about instances in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateInstance(self, request, context): - """Updates an instance within a project. This method updates only the display - name and type for an Instance. To update other Instance properties, such as - labels, use PartialUpdateInstance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def PartialUpdateInstance(self, request, context): - """Partially updates an instance within a project. This method can modify all - fields of an Instance and is the preferred way to update an Instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteInstance(self, request, context): - """Delete an instance from a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateCluster(self, request, context): - """Creates a cluster within an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetCluster(self, request, context): - """Gets information about a cluster. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListClusters(self, request, context): - """Lists information about clusters in an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateCluster(self, request, context): - """Updates a cluster within an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteCluster(self, request, context): - """Deletes a cluster from an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateAppProfile(self, request, context): - """Creates an app profile within an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetAppProfile(self, request, context): - """Gets information about an app profile. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListAppProfiles(self, request, context): - """Lists information about app profiles in an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateAppProfile(self, request, context): - """Updates an app profile within an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteAppProfile(self, request, context): - """Deletes an app profile from an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIamPolicy(self, request, context): - """Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SetIamPolicy(self, request, context): - """Sets the access control policy on an instance resource. Replaces any - existing policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified instance resource. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_BigtableInstanceAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateInstance": grpc.unary_unary_rpc_method_handler( - servicer.CreateInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetInstance": grpc.unary_unary_rpc_method_handler( - servicer.GetInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - ), - "ListInstances": grpc.unary_unary_rpc_method_handler( - servicer.ListInstances, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.SerializeToString, - ), - "UpdateInstance": grpc.unary_unary_rpc_method_handler( - servicer.UpdateInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - ), - "PartialUpdateInstance": grpc.unary_unary_rpc_method_handler( - servicer.PartialUpdateInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteInstance": grpc.unary_unary_rpc_method_handler( - servicer.DeleteInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "CreateCluster": grpc.unary_unary_rpc_method_handler( - servicer.CreateCluster, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetCluster": grpc.unary_unary_rpc_method_handler( - servicer.GetCluster, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - ), - "ListClusters": grpc.unary_unary_rpc_method_handler( - servicer.ListClusters, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.SerializeToString, - ), - "UpdateCluster": grpc.unary_unary_rpc_method_handler( - servicer.UpdateCluster, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteCluster": grpc.unary_unary_rpc_method_handler( - servicer.DeleteCluster, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "CreateAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.CreateAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, - ), - "GetAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.GetAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, - ), - "ListAppProfiles": grpc.unary_unary_rpc_method_handler( - servicer.ListAppProfiles, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.SerializeToString, - ), - "UpdateAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.UpdateAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.DeleteAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "SetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "TestIamPermissions": grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.bigtable.admin.v2.BigtableInstanceAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class BigtableInstanceAdmin(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - @staticmethod - def CreateInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListInstances( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def PartialUpdateInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListClusters( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateAppProfile( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetAppProfile( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListAppProfiles( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateAppProfile( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteAppProfile( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def TestIamPermissions( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py deleted file mode 100644 index 2e2b6f7d9..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ /dev/null @@ -1,3577 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_admin_v2.proto import ( - common_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2, -) -from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n?google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto\x1a\x30google/cloud/bigtable_admin_v2/proto/table.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xa7\x01\n\x13RestoreTableRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x35\n\x06\x62\x61\x63kup\x18\x03 \x01(\tB#\xfa\x41 \n\x1e\x62igtable.googleapis.com/BackupH\x00\x42\x08\n\x06source"\x98\x02\n\x14RestoreTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\x0bsource_type\x18\x02 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x12%\n\x1doptimize_table_operation_name\x18\x04 \x01(\t\x12=\n\x08progress\x18\x05 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgressB\r\n\x0bsource_info"l\n\x1dOptimizeRestoredTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12=\n\x08progress\x18\x02 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgress"\xfc\x01\n\x12\x43reateTableRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x33\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.TableB\x03\xe0\x41\x02\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"\xb4\x01\n\x1e\x43reateTableFromSnapshotRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x0fsource_snapshot\x18\x03 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\x94\x01\n\x13\x44ropRowRangeRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"\xa8\x01\n\x11ListTablesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"z\n\x0fGetTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View"I\n\x12\x44\x65leteTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"\xda\x02\n\x1bModifyColumnFamiliesRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12^\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.ModificationB\x03\xe0\x41\x02\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"V\n\x1fGenerateConsistencyTokenRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"n\n\x17\x43heckConsistencyRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x1e\n\x11\x63onsistency_token\x18\x02 \x01(\tB\x03\xe0\x41\x02".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\xdc\x01\n\x14SnapshotTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x38\n\x07\x63luster\x18\x02 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x18\n\x0bsnapshot_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t"L\n\x12GetSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"v\n\x14ListSnapshotsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"O\n\x15\x44\x65leteSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x9d\x01\n\x13\x43reateBackupRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x16\n\tbackup_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x35\n\x06\x62\x61\x63kup\x18\x03 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02"\x98\x01\n\x14\x43reateBackupMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csource_table\x18\x02 \x01(\t\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x82\x01\n\x13UpdateBackupRequest\x12\x35\n\x06\x62\x61\x63kup\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"H\n\x10GetBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"K\n\x13\x44\x65leteBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x96\x01\n\x12ListBackupsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x05 \x01(\t"a\n\x13ListBackupsResponse\x12\x31\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\x98%\n\x12\x42igtableTableAdmin\x12\xab\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"M\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\xda\x41\x15parent,table_id,table\x12\x8a\x02\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"\x95\x01\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\xda\x41\x1fparent,table_id,source_snapshot\xca\x41(\n\x05Table\x12\x1f\x43reateTableFromSnapshotMetadata\x12\xa4\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse";\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\xda\x41\x06parent\x12\x91\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"9\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\x8e\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"9\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\xcf\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"_\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\xda\x41\x12name,modifications\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe8\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"U\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\xda\x41\x04name\x12\xda\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"_\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\xda\x41\x16name,consistency_token\x12\xea\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation"\x89\x01\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\xda\x41$name,cluster,snapshot_id,description\xca\x41!\n\x08Snapshot\x12\x15SnapshotTableMetadata\x12\xa8\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"G\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xbb\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"I\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\xda\x41\x06parent\x12\xa2\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xe0\x01\n\x0c\x43reateBackup\x12-.google.bigtable.admin.v2.CreateBackupRequest\x1a\x1d.google.longrunning.Operation"\x81\x01\x82\xd3\xe4\x93\x02@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\x06\x62\x61\x63kup\xda\x41\x17parent,backup_id,backup\xca\x41\x1e\n\x06\x42\x61\x63kup\x12\x14\x43reateBackupMetadata\x12\xa0\x01\n\tGetBackup\x12*.google.bigtable.admin.v2.GetBackupRequest\x1a .google.bigtable.admin.v2.Backup"E\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xc3\x01\n\x0cUpdateBackup\x12-.google.bigtable.admin.v2.UpdateBackupRequest\x1a .google.bigtable.admin.v2.Backup"b\x82\xd3\xe4\x93\x02G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\x06\x62\x61\x63kup\xda\x41\x12\x62\x61\x63kup,update_mask\x12\x9c\x01\n\x0c\x44\x65leteBackup\x12-.google.bigtable.admin.v2.DeleteBackupRequest\x1a\x16.google.protobuf.Empty"E\x82\xd3\xe4\x93\x02\x38*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xb3\x01\n\x0bListBackups\x12,.google.bigtable.admin.v2.ListBackupsRequest\x1a-.google.bigtable.admin.v2.ListBackupsResponse"G\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{parent=projects/*/instances/*/clusters/*}/backups\xda\x41\x06parent\x12\xbb\x01\n\x0cRestoreTable\x12-.google.bigtable.admin.v2.RestoreTableRequest\x1a\x1d.google.longrunning.Operation"]\x82\xd3\xe4\x93\x02\x37"2/v2/{parent=projects/*/instances/*}/tables:restore:\x01*\xca\x41\x1d\n\x05Table\x12\x14RestoreTableMetadata\x12\xec\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\xa0\x01\x82\xd3\xe4\x93\x02\x8e\x01";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\x01*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\xf3\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\xa7\x01\x82\xd3\xe4\x93\x02\x8e\x01";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\x01*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xa4\x02\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"\xb8\x01\x82\xd3\xe4\x93\x02\x9a\x01"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\x01*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\xde\x02\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xbb\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xdf\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_RESTORETABLEREQUEST = _descriptor.Descriptor( - name="RestoreTableRequest", - full_name="google.bigtable.admin.v2.RestoreTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.RestoreTableRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table_id", - full_name="google.bigtable.admin.v2.RestoreTableRequest.table_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.RestoreTableRequest.backup", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\372A \n\036bigtable.googleapis.com/Backup", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source", - full_name="google.bigtable.admin.v2.RestoreTableRequest.source", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=535, - serialized_end=702, -) - - -_RESTORETABLEMETADATA = _descriptor.Descriptor( - name="RestoreTableMetadata", - full_name="google.bigtable.admin.v2.RestoreTableMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_type", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_type", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup_info", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.backup_info", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="optimize_table_operation_name", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.optimize_table_operation_name", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="progress", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.progress", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source_info", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_info", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=705, - serialized_end=985, -) - - -_OPTIMIZERESTOREDTABLEMETADATA = _descriptor.Descriptor( - name="OptimizeRestoredTableMetadata", - full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="progress", - full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=987, - serialized_end=1095, -) - - -_CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor( - name="Split", - full_name="google.bigtable.admin.v2.CreateTableRequest.Split", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.CreateTableRequest.Split.key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1330, - serialized_end=1350, -) - -_CREATETABLEREQUEST = _descriptor.Descriptor( - name="CreateTableRequest", - full_name="google.bigtable.admin.v2.CreateTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateTableRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table_id", - full_name="google.bigtable.admin.v2.CreateTableRequest.table_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table", - full_name="google.bigtable.admin.v2.CreateTableRequest.table", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="initial_splits", - full_name="google.bigtable.admin.v2.CreateTableRequest.initial_splits", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_CREATETABLEREQUEST_SPLIT,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1098, - serialized_end=1350, -) - - -_CREATETABLEFROMSNAPSHOTREQUEST = _descriptor.Descriptor( - name="CreateTableFromSnapshotRequest", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table_id", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.table_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_snapshot", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.source_snapshot", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1353, - serialized_end=1533, -) - - -_DROPROWRANGEREQUEST = _descriptor.Descriptor( - name="DropRowRangeRequest", - full_name="google.bigtable.admin.v2.DropRowRangeRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key_prefix", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_all_data_from_table", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="target", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.target", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1536, - serialized_end=1684, -) - - -_LISTTABLESREQUEST = _descriptor.Descriptor( - name="ListTablesRequest", - full_name="google.bigtable.admin.v2.ListTablesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListTablesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="view", - full_name="google.bigtable.admin.v2.ListTablesRequest.view", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListTablesRequest.page_size", - index=2, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListTablesRequest.page_token", - index=3, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1687, - serialized_end=1855, -) - - -_LISTTABLESRESPONSE = _descriptor.Descriptor( - name="ListTablesResponse", - full_name="google.bigtable.admin.v2.ListTablesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="tables", - full_name="google.bigtable.admin.v2.ListTablesResponse.tables", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListTablesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1857, - serialized_end=1951, -) - - -_GETTABLEREQUEST = _descriptor.Descriptor( - name="GetTableRequest", - full_name="google.bigtable.admin.v2.GetTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetTableRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="view", - full_name="google.bigtable.admin.v2.GetTableRequest.view", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1953, - serialized_end=2075, -) - - -_DELETETABLEREQUEST = _descriptor.Descriptor( - name="DeleteTableRequest", - full_name="google.bigtable.admin.v2.DeleteTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteTableRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2077, - serialized_end=2150, -) - - -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION = _descriptor.Descriptor( - name="Modification", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="id", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="drop", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="mod", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=2334, - serialized_end=2499, -) - -_MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( - name="ModifyColumnFamiliesRequest", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="modifications", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2153, - serialized_end=2499, -) - - -_GENERATECONSISTENCYTOKENREQUEST = _descriptor.Descriptor( - name="GenerateConsistencyTokenRequest", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2501, - serialized_end=2587, -) - - -_GENERATECONSISTENCYTOKENRESPONSE = _descriptor.Descriptor( - name="GenerateConsistencyTokenResponse", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="consistency_token", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenResponse.consistency_token", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2589, - serialized_end=2650, -) - - -_CHECKCONSISTENCYREQUEST = _descriptor.Descriptor( - name="CheckConsistencyRequest", - full_name="google.bigtable.admin.v2.CheckConsistencyRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.CheckConsistencyRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="consistency_token", - full_name="google.bigtable.admin.v2.CheckConsistencyRequest.consistency_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2652, - serialized_end=2762, -) - - -_CHECKCONSISTENCYRESPONSE = _descriptor.Descriptor( - name="CheckConsistencyResponse", - full_name="google.bigtable.admin.v2.CheckConsistencyResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="consistent", - full_name="google.bigtable.admin.v2.CheckConsistencyResponse.consistent", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2764, - serialized_end=2810, -) - - -_SNAPSHOTTABLEREQUEST = _descriptor.Descriptor( - name="SnapshotTableRequest", - full_name="google.bigtable.admin.v2.SnapshotTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.cluster", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="snapshot_id", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.snapshot_id", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ttl", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.ttl", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.description", - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2813, - serialized_end=3033, -) - - -_GETSNAPSHOTREQUEST = _descriptor.Descriptor( - name="GetSnapshotRequest", - full_name="google.bigtable.admin.v2.GetSnapshotRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetSnapshotRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3035, - serialized_end=3111, -) - - -_LISTSNAPSHOTSREQUEST = _descriptor.Descriptor( - name="ListSnapshotsRequest", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest.page_size", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest.page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3113, - serialized_end=3231, -) - - -_LISTSNAPSHOTSRESPONSE = _descriptor.Descriptor( - name="ListSnapshotsResponse", - full_name="google.bigtable.admin.v2.ListSnapshotsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="snapshots", - full_name="google.bigtable.admin.v2.ListSnapshotsResponse.snapshots", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListSnapshotsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3233, - serialized_end=3336, -) - - -_DELETESNAPSHOTREQUEST = _descriptor.Descriptor( - name="DeleteSnapshotRequest", - full_name="google.bigtable.admin.v2.DeleteSnapshotRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteSnapshotRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3338, - serialized_end=3417, -) - - -_SNAPSHOTTABLEMETADATA = _descriptor.Descriptor( - name="SnapshotTableMetadata", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3420, - serialized_end=3616, -) - - -_CREATETABLEFROMSNAPSHOTMETADATA = _descriptor.Descriptor( - name="CreateTableFromSnapshotMetadata", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3619, - serialized_end=3835, -) - - -_CREATEBACKUPREQUEST = _descriptor.Descriptor( - name="CreateBackupRequest", - full_name="google.bigtable.admin.v2.CreateBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateBackupRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup_id", - full_name="google.bigtable.admin.v2.CreateBackupRequest.backup_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.CreateBackupRequest.backup", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3838, - serialized_end=3995, -) - - -_CREATEBACKUPMETADATA = _descriptor.Descriptor( - name="CreateBackupMetadata", - full_name="google.bigtable.admin.v2.CreateBackupMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.CreateBackupMetadata.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.CreateBackupMetadata.source_table", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.bigtable.admin.v2.CreateBackupMetadata.start_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.bigtable.admin.v2.CreateBackupMetadata.end_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3998, - serialized_end=4150, -) - - -_UPDATEBACKUPREQUEST = _descriptor.Descriptor( - name="UpdateBackupRequest", - full_name="google.bigtable.admin.v2.UpdateBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.UpdateBackupRequest.backup", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.bigtable.admin.v2.UpdateBackupRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4153, - serialized_end=4283, -) - - -_GETBACKUPREQUEST = _descriptor.Descriptor( - name="GetBackupRequest", - full_name="google.bigtable.admin.v2.GetBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetBackupRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4285, - serialized_end=4357, -) - - -_DELETEBACKUPREQUEST = _descriptor.Descriptor( - name="DeleteBackupRequest", - full_name="google.bigtable.admin.v2.DeleteBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteBackupRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4359, - serialized_end=4434, -) - - -_LISTBACKUPSREQUEST = _descriptor.Descriptor( - name="ListBackupsRequest", - full_name="google.bigtable.admin.v2.ListBackupsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListBackupsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.bigtable.admin.v2.ListBackupsRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="order_by", - full_name="google.bigtable.admin.v2.ListBackupsRequest.order_by", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListBackupsRequest.page_size", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListBackupsRequest.page_token", - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4437, - serialized_end=4587, -) - - -_LISTBACKUPSRESPONSE = _descriptor.Descriptor( - name="ListBackupsResponse", - full_name="google.bigtable.admin.v2.ListBackupsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backups", - full_name="google.bigtable.admin.v2.ListBackupsResponse.backups", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListBackupsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4589, - serialized_end=4686, -) - -_RESTORETABLEREQUEST.oneofs_by_name["source"].fields.append( - _RESTORETABLEREQUEST.fields_by_name["backup"] -) -_RESTORETABLEREQUEST.fields_by_name[ - "backup" -].containing_oneof = _RESTORETABLEREQUEST.oneofs_by_name["source"] -_RESTORETABLEMETADATA.fields_by_name[ - "source_type" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._RESTORESOURCETYPE -) -_RESTORETABLEMETADATA.fields_by_name[ - "backup_info" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUPINFO -) -_RESTORETABLEMETADATA.fields_by_name[ - "progress" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS -) -_RESTORETABLEMETADATA.oneofs_by_name["source_info"].fields.append( - _RESTORETABLEMETADATA.fields_by_name["backup_info"] -) -_RESTORETABLEMETADATA.fields_by_name[ - "backup_info" -].containing_oneof = _RESTORETABLEMETADATA.oneofs_by_name["source_info"] -_OPTIMIZERESTOREDTABLEMETADATA.fields_by_name[ - "progress" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS -) -_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST -_CREATETABLEREQUEST.fields_by_name[ - "table" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE -) -_CREATETABLEREQUEST.fields_by_name[ - "initial_splits" -].message_type = _CREATETABLEREQUEST_SPLIT -_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( - _DROPROWRANGEREQUEST.fields_by_name["row_key_prefix"] -) -_DROPROWRANGEREQUEST.fields_by_name[ - "row_key_prefix" -].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] -_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( - _DROPROWRANGEREQUEST.fields_by_name["delete_all_data_from_table"] -) -_DROPROWRANGEREQUEST.fields_by_name[ - "delete_all_data_from_table" -].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] -_LISTTABLESREQUEST.fields_by_name[ - "view" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW -) -_LISTTABLESRESPONSE.fields_by_name[ - "tables" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE -) -_GETTABLEREQUEST.fields_by_name[ - "view" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "create" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "update" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["create"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "create" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["update"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "update" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["drop"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "drop" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name[ - "modifications" -].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION -_SNAPSHOTTABLEREQUEST.fields_by_name[ - "ttl" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_LISTSNAPSHOTSRESPONSE.fields_by_name[ - "snapshots" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._SNAPSHOT -) -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "original_request" -].message_type = _SNAPSHOTTABLEREQUEST -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATETABLEFROMSNAPSHOTREQUEST -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEBACKUPREQUEST.fields_by_name[ - "backup" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP -) -_CREATEBACKUPMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEBACKUPMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEBACKUPREQUEST.fields_by_name[ - "backup" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP -) -_UPDATEBACKUPREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTBACKUPSRESPONSE.fields_by_name[ - "backups" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP -) -DESCRIPTOR.message_types_by_name["RestoreTableRequest"] = _RESTORETABLEREQUEST -DESCRIPTOR.message_types_by_name["RestoreTableMetadata"] = _RESTORETABLEMETADATA -DESCRIPTOR.message_types_by_name[ - "OptimizeRestoredTableMetadata" -] = _OPTIMIZERESTOREDTABLEMETADATA -DESCRIPTOR.message_types_by_name["CreateTableRequest"] = _CREATETABLEREQUEST -DESCRIPTOR.message_types_by_name[ - "CreateTableFromSnapshotRequest" -] = _CREATETABLEFROMSNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["DropRowRangeRequest"] = _DROPROWRANGEREQUEST -DESCRIPTOR.message_types_by_name["ListTablesRequest"] = _LISTTABLESREQUEST -DESCRIPTOR.message_types_by_name["ListTablesResponse"] = _LISTTABLESRESPONSE -DESCRIPTOR.message_types_by_name["GetTableRequest"] = _GETTABLEREQUEST -DESCRIPTOR.message_types_by_name["DeleteTableRequest"] = _DELETETABLEREQUEST -DESCRIPTOR.message_types_by_name[ - "ModifyColumnFamiliesRequest" -] = _MODIFYCOLUMNFAMILIESREQUEST -DESCRIPTOR.message_types_by_name[ - "GenerateConsistencyTokenRequest" -] = _GENERATECONSISTENCYTOKENREQUEST -DESCRIPTOR.message_types_by_name[ - "GenerateConsistencyTokenResponse" -] = _GENERATECONSISTENCYTOKENRESPONSE -DESCRIPTOR.message_types_by_name["CheckConsistencyRequest"] = _CHECKCONSISTENCYREQUEST -DESCRIPTOR.message_types_by_name["CheckConsistencyResponse"] = _CHECKCONSISTENCYRESPONSE -DESCRIPTOR.message_types_by_name["SnapshotTableRequest"] = _SNAPSHOTTABLEREQUEST -DESCRIPTOR.message_types_by_name["GetSnapshotRequest"] = _GETSNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["ListSnapshotsRequest"] = _LISTSNAPSHOTSREQUEST -DESCRIPTOR.message_types_by_name["ListSnapshotsResponse"] = _LISTSNAPSHOTSRESPONSE -DESCRIPTOR.message_types_by_name["DeleteSnapshotRequest"] = _DELETESNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["SnapshotTableMetadata"] = _SNAPSHOTTABLEMETADATA -DESCRIPTOR.message_types_by_name[ - "CreateTableFromSnapshotMetadata" -] = _CREATETABLEFROMSNAPSHOTMETADATA -DESCRIPTOR.message_types_by_name["CreateBackupRequest"] = _CREATEBACKUPREQUEST -DESCRIPTOR.message_types_by_name["CreateBackupMetadata"] = _CREATEBACKUPMETADATA -DESCRIPTOR.message_types_by_name["UpdateBackupRequest"] = _UPDATEBACKUPREQUEST -DESCRIPTOR.message_types_by_name["GetBackupRequest"] = _GETBACKUPREQUEST -DESCRIPTOR.message_types_by_name["DeleteBackupRequest"] = _DELETEBACKUPREQUEST -DESCRIPTOR.message_types_by_name["ListBackupsRequest"] = _LISTBACKUPSREQUEST -DESCRIPTOR.message_types_by_name["ListBackupsResponse"] = _LISTBACKUPSRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -RestoreTableRequest = _reflection.GeneratedProtocolMessageType( - "RestoreTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _RESTORETABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableA - dmin.RestoreTable]. - - Attributes: - parent: - Required. The name of the instance in which to create the - restored table. This instance must be the parent of the source - backup. Values are of the form - ``projects//instances/``. - table_id: - Required. The id of the table to create and restore to. This - table must not already exist. The ``table_id`` appended to - ``parent`` forms the full table name of the form - ``projects//instances//tables/``. - source: - Required. The source from which to restore. - backup: - Name of the backup from which to restore. Values are of the - form ``projects//instances//clusters//backups/``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableRequest) - }, -) -_sym_db.RegisterMessage(RestoreTableRequest) - -RestoreTableMetadata = _reflection.GeneratedProtocolMessageType( - "RestoreTableMetadata", - (_message.Message,), - { - "DESCRIPTOR": _RESTORETABLEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Metadata type for the long-running operation returned by [RestoreTable - ][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - - Attributes: - name: - Name of the table being created and restored to. - source_type: - The type of the restore source. - source_info: - Information about the source used to restore the table, as - specified by ``source`` in [RestoreTableRequest][google.bigtab - le.admin.v2.RestoreTableRequest]. - optimize_table_operation_name: - If exists, the name of the long-running operation that will be - used to track the post-restore optimization process to - optimize the performance of the restored table. The metadata - type of the long-running operation is - [OptimizeRestoreTableMetadata][]. The response type is - [Empty][google.protobuf.Empty]. This long-running operation - may be automatically created by the system if applicable after - the RestoreTable long-running operation completes - successfully. This operation may not be created if the table - is already optimized or the restore was not successful. - progress: - The progress of the [RestoreTable][google.bigtable.admin.v2.Bi - gtableTableAdmin.RestoreTable] operation. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableMetadata) - }, -) -_sym_db.RegisterMessage(RestoreTableMetadata) - -OptimizeRestoredTableMetadata = _reflection.GeneratedProtocolMessageType( - "OptimizeRestoredTableMetadata", - (_message.Message,), - { - "DESCRIPTOR": _OPTIMIZERESTOREDTABLEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Metadata type for the long-running operation used to track the - progress of optimizations performed on a newly restored table. This - long-running operation is automatically created by the system after - the successful completion of a table restore, and cannot be cancelled. - - Attributes: - name: - Name of the restored table being optimized. - progress: - The progress of the post-restore optimizations. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.OptimizeRestoredTableMetadata) - }, -) -_sym_db.RegisterMessage(OptimizeRestoredTableMetadata) - -CreateTableRequest = _reflection.GeneratedProtocolMessageType( - "CreateTableRequest", - (_message.Message,), - { - "Split": _reflection.GeneratedProtocolMessageType( - "Split", - (_message.Message,), - { - "DESCRIPTOR": _CREATETABLEREQUEST_SPLIT, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """An initial split point for a newly created table. - - Attributes: - key: - Row key to use as an initial tablet boundary. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) - }, - ), - "DESCRIPTOR": _CREATETABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Creat - eTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - - Attributes: - parent: - Required. The unique name of the instance in which to create - the table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id: - Required. The name by which the new table should be referred - to within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. Maximum 50 characters. - table: - Required. The Table to create. - initial_splits: - The optional list of row keys that will be used to initially - split the table into several tablets (tablets are similar to - HBase regions). Given two split keys, ``s1`` and ``s2``, three - tablets will be created, spanning the key ranges: ``[, s1), - [s1, s2), [s2, )``. Example: - Row keys := ``["a", "apple", - "custom", "customer_1", "customer_2",`` ``"other", "zz"]`` - - initial_split_keys := ``["apple", "customer_1", - "customer_2", "other"]`` - Key assignment: - Tablet 1 - ``[, apple) => {"a"}.`` - Tablet 2 - ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) - }, -) -_sym_db.RegisterMessage(CreateTableRequest) -_sym_db.RegisterMessage(CreateTableRequest.Split) - -CreateTableFromSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "CreateTableFromSnapshotRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATETABLEFROMSNAPSHOTREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Creat - eTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.Create - TableFromSnapshot] Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently available to most - Cloud Bigtable customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. It is not - subject to any SLA or deprecation policy. - - Attributes: - parent: - Required. The unique name of the instance in which to create - the table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id: - Required. The name by which the new table should be referred - to within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. - source_snapshot: - Required. The unique name of the snapshot from which to - restore the table. The snapshot and the table must be in the - same instance. Values are of the form ``projects/{project}/ins - tances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotRequest) - }, -) -_sym_db.RegisterMessage(CreateTableFromSnapshotRequest) - -DropRowRangeRequest = _reflection.GeneratedProtocolMessageType( - "DropRowRangeRequest", - (_message.Message,), - { - "DESCRIPTOR": _DROPROWRANGEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropR - owRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - - Attributes: - name: - Required. The unique name of the table on which to drop a - range of rows. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - target: - Delete all rows or by prefix. - row_key_prefix: - Delete all rows that start with this row key prefix. Prefix - cannot be zero length. - delete_all_data_from_table: - Delete all rows in the table. Setting this to false is a no- - op. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) - }, -) -_sym_db.RegisterMessage(DropRowRangeRequest) - -ListTablesRequest = _reflection.GeneratedProtocolMessageType( - "ListTablesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTTABLESREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListT - ables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Attributes: - parent: - Required. The unique name of the instance for which tables - should be listed. Values are of the form - ``projects/{project}/instances/{instance}``. - view: - The view to be applied to the returned tables’ fields. Only - NAME_ONLY view (default) and REPLICATION_VIEW are supported. - page_size: - Maximum number of results per page. A page_size of zero lets - the server choose the number of items to return. A page_size - which is strictly positive will return at most that many - items. A negative page_size will cause an error. Following - the first request, subsequent paginated calls are not required - to pass a page_size. If a page_size is set in subsequent - calls, it must match the page_size given in the first request. - page_token: - The value of ``next_page_token`` returned by a previous call. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) - }, -) -_sym_db.RegisterMessage(ListTablesRequest) - -ListTablesResponse = _reflection.GeneratedProtocolMessageType( - "ListTablesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTTABLESRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.List - Tables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Attributes: - tables: - The tables present in the requested instance. - next_page_token: - Set if not all tables could be returned in a single response. - Pass this value to ``page_token`` in another request to get - the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) - }, -) -_sym_db.RegisterMessage(ListTablesResponse) - -GetTableRequest = _reflection.GeneratedProtocolMessageType( - "GetTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETTABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTa - ble][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - - Attributes: - name: - Required. The unique name of the requested table. Values are - of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - view: - The view to be applied to the returned table’s fields. - Defaults to ``SCHEMA_VIEW`` if unspecified. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) - }, -) -_sym_db.RegisterMessage(GetTableRequest) - -DeleteTableRequest = _reflection.GeneratedProtocolMessageType( - "DeleteTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETETABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Delet - eTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - - Attributes: - name: - Required. The unique name of the table to be deleted. Values - are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) - }, -) -_sym_db.RegisterMessage(DeleteTableRequest) - -ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType( - "ModifyColumnFamiliesRequest", - (_message.Message,), - { - "Modification": _reflection.GeneratedProtocolMessageType( - "Modification", - (_message.Message,), - { - "DESCRIPTOR": _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """A create, update, or delete of a particular column family. - - Attributes: - id: - The ID of the column family to be modified. - mod: - Column familiy modifications. - create: - Create a new column family with the specified schema, or fail - if one already exists with the given ID. - update: - Update an existing column family to the specified schema, or - fail if no column family exists with the given ID. - drop: - Drop (delete) the column family with the given ID, or fail if - no such family exists. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) - }, - ), - "DESCRIPTOR": _MODIFYCOLUMNFAMILIESREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Modif - yColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyCol - umnFamilies] - - Attributes: - name: - Required. The unique name of the table whose families should - be modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - modifications: - Required. Modifications to be atomically applied to the - specified table’s families. Entries are applied in order, - meaning that earlier modifications can be masked by later ones - (in the case of repeated updates to the same family, for - example). - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) - }, -) -_sym_db.RegisterMessage(ModifyColumnFamiliesRequest) -_sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification) - -GenerateConsistencyTokenRequest = _reflection.GeneratedProtocolMessageType( - "GenerateConsistencyTokenRequest", - (_message.Message,), - { - "DESCRIPTOR": _GENERATECONSISTENCYTOKENREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Gener - ateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.Gener - ateConsistencyToken] - - Attributes: - name: - Required. The unique name of the Table for which to create a - consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenRequest) - }, -) -_sym_db.RegisterMessage(GenerateConsistencyTokenRequest) - -GenerateConsistencyTokenResponse = _reflection.GeneratedProtocolMessageType( - "GenerateConsistencyTokenResponse", - (_message.Message,), - { - "DESCRIPTOR": _GENERATECONSISTENCYTOKENRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.Gene - rateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.Gene - rateConsistencyToken] - - Attributes: - consistency_token: - The generated consistency token. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenResponse) - }, -) -_sym_db.RegisterMessage(GenerateConsistencyTokenResponse) - -CheckConsistencyRequest = _reflection.GeneratedProtocolMessageType( - "CheckConsistencyRequest", - (_message.Message,), - { - "DESCRIPTOR": _CHECKCONSISTENCYREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Check - Consistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsiste - ncy] - - Attributes: - name: - Required. The unique name of the Table for which to check - replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - consistency_token: - Required. The token created using GenerateConsistencyToken for - the Table. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyRequest) - }, -) -_sym_db.RegisterMessage(CheckConsistencyRequest) - -CheckConsistencyResponse = _reflection.GeneratedProtocolMessageType( - "CheckConsistencyResponse", - (_message.Message,), - { - "DESCRIPTOR": _CHECKCONSISTENCYRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.Chec - kConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsist - ency] - - Attributes: - consistent: - True only if the token is consistent. A token is consistent if - replication has caught up with the restrictions specified in - the request. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyResponse) - }, -) -_sym_db.RegisterMessage(CheckConsistencyResponse) - -SnapshotTableRequest = _reflection.GeneratedProtocolMessageType( - "SnapshotTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _SNAPSHOTTABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Snaps - hotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - name: - Required. The unique name of the table to have the snapshot - taken. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - cluster: - Required. The name of the cluster where the snapshot will be - created in. Values are of the form ``projects/{project}/instan - ces/{instance}/clusters/{cluster}``. - snapshot_id: - Required. The ID by which the new snapshot should be referred - to within the parent cluster, e.g., ``mysnapshot`` of the - form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than ``projects/{ - project}/instances/{instance}/clusters/{cluster}/snapshots/mys - napshot``. - ttl: - The amount of time that the new snapshot can stay active after - it is created. Once ‘ttl’ expires, the snapshot will get - deleted. The maximum amount of time a snapshot can stay active - is 7 days. If ‘ttl’ is not specified, the default value of 24 - hours will be used. - description: - Description of the snapshot. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableRequest) - }, -) -_sym_db.RegisterMessage(SnapshotTableRequest) - -GetSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "GetSnapshotRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETSNAPSHOTREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSn - apshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - name: - Required. The unique name of the requested snapshot. Values - are of the form ``projects/{project}/instances/{instance}/clus - ters/{cluster}/snapshots/{snapshot}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetSnapshotRequest) - }, -) -_sym_db.RegisterMessage(GetSnapshotRequest) - -ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType( - "ListSnapshotsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTSNAPSHOTSREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListS - napshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - parent: - Required. The unique name of the cluster for which snapshots - should be listed. Values are of the form ``projects/{project}/ - instances/{instance}/clusters/{cluster}``. Use ``{cluster} = - '-'`` to list snapshots for all clusters in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - page_size: - The maximum number of snapshots to return per page. CURRENTLY - UNIMPLEMENTED AND IGNORED. - page_token: - The value of ``next_page_token`` returned by a previous call. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsRequest) - }, -) -_sym_db.RegisterMessage(ListSnapshotsRequest) - -ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType( - "ListSnapshotsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTSNAPSHOTSRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.List - Snapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - snapshots: - The snapshots present in the requested cluster. - next_page_token: - Set if not all snapshots could be returned in a single - response. Pass this value to ``page_token`` in another request - to get the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsResponse) - }, -) -_sym_db.RegisterMessage(ListSnapshotsResponse) - -DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "DeleteSnapshotRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETESNAPSHOTREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Delet - eSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - name: - Required. The unique name of the snapshot to be deleted. - Values are of the form ``projects/{project}/instances/{instanc - e}/clusters/{cluster}/snapshots/{snapshot}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteSnapshotRequest) - }, -) -_sym_db.RegisterMessage(DeleteSnapshotRequest) - -SnapshotTableMetadata = _reflection.GeneratedProtocolMessageType( - "SnapshotTableMetadata", - (_message.Message,), - { - "DESCRIPTOR": _SNAPSHOTTABLEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The metadata for the Operation returned by SnapshotTable. Note: This - is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Attributes: - original_request: - The request that prompted the initiation of this SnapshotTable - operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableMetadata) - }, -) -_sym_db.RegisterMessage(SnapshotTableMetadata) - -CreateTableFromSnapshotMetadata = _reflection.GeneratedProtocolMessageType( - "CreateTableFromSnapshotMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATETABLEFROMSNAPSHOTMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The metadata for the Operation returned by CreateTableFromSnapshot. - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - original_request: - The request that prompted the initiation of this - CreateTableFromSnapshot operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotMetadata) - }, -) -_sym_db.RegisterMessage(CreateTableFromSnapshotMetadata) - -CreateBackupRequest = _reflection.GeneratedProtocolMessageType( - "CreateBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableA - dmin.CreateBackup]. - - Attributes: - parent: - Required. This must be one of the clusters in the instance in - which this table is located. The backup will be stored in this - cluster. Values are of the form ``projects/{project}/instances - /{instance}/clusters/{cluster}``. - backup_id: - Required. The id of the backup to be created. The - ``backup_id`` along with the parent ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup name, - of the form: ``projects/{project}/instances/{instance}/cluster - s/{cluster}/backups/{backup_id}``. This string must be between - 1 and 50 characters in length and match the regex [_a- - zA-Z0-9][-_.a-zA-Z0-9]*. - backup: - Required. The backup to create. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateBackupRequest) - }, -) -_sym_db.RegisterMessage(CreateBackupRequest) - -CreateBackupMetadata = _reflection.GeneratedProtocolMessageType( - "CreateBackupMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATEBACKUPMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Metadata type for the operation returned by [CreateBackup][google.bigt - able.admin.v2.BigtableTableAdmin.CreateBackup]. - - Attributes: - name: - The name of the backup being created. - source_table: - The name of the table the backup is created from. - start_time: - The time at which this operation started. - end_time: - If set, the time at which this operation finished or was - cancelled. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateBackupMetadata) - }, -) -_sym_db.RegisterMessage(CreateBackupMetadata) - -UpdateBackupRequest = _reflection.GeneratedProtocolMessageType( - "UpdateBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableA - dmin.UpdateBackup]. - - Attributes: - backup: - Required. The backup to update. ``backup.name``, and the - fields to be updated as specified by ``update_mask`` are - required. Other fields are ignored. Update is only supported - for the following fields: \* ``backup.expire_time``. - update_mask: - Required. A mask specifying which fields (e.g. - ``expire_time``) in the Backup resource should be updated. - This mask is relative to the Backup resource, not to the - request message. The field mask must always be specified; this - prevents any future fields from being erased accidentally by - clients that do not know about them. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateBackupRequest) - }, -) -_sym_db.RegisterMessage(UpdateBackupRequest) - -GetBackupRequest = _reflection.GeneratedProtocolMessageType( - "GetBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - - Attributes: - name: - Required. Name of the backup. Values are of the form ``project - s/{project}/instances/{instance}/clusters/{cluster}/backups/{b - ackup}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetBackupRequest) - }, -) -_sym_db.RegisterMessage(GetBackupRequest) - -DeleteBackupRequest = _reflection.GeneratedProtocolMessageType( - "DeleteBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableA - dmin.DeleteBackup]. - - Attributes: - name: - Required. Name of the backup to delete. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/b - ackups/{backup}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteBackupRequest) - }, -) -_sym_db.RegisterMessage(DeleteBackupRequest) - -ListBackupsRequest = _reflection.GeneratedProtocolMessageType( - "ListBackupsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTBACKUPSREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAd - min.ListBackups]. - - Attributes: - parent: - Required. The cluster to list backups from. Values are of the - form ``projects/{project}/instances/{instance}/clusters/{clust - er}``. Use ``{cluster} = '-'`` to list backups for all - clusters in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - filter: - A filter expression that filters backups listed in the - response. The expression must specify the field name, a - comparison operator, and the value that you want to use for - filtering. The value must be a string, a number, or a boolean. - The comparison operator must be <, >, <=, >=, !=, =, or :. - Colon ‘:’ represents a HAS operator which is roughly - synonymous with equality. Filter rules are case insensitive. - The fields eligible for filtering are: \* ``name`` \* - ``source_table`` \* ``state`` \* ``start_time`` (and values - are of the format YYYY-MM-DDTHH:MM:SSZ) \* ``end_time`` (and - values are of the format YYYY-MM-DDTHH:MM:SSZ) \* - ``expire_time`` (and values are of the format YYYY-MM- - DDTHH:MM:SSZ) \* ``size_bytes`` To filter on multiple - expressions, provide each separate expression within - parentheses. By default, each expression is an AND expression. - However, you can include AND, OR, and NOT expressions - explicitly. Some examples of using filters are: - - ``name:"exact"`` –> The backup’s name is the string “exact”. - - ``name:howl`` –> The backup’s name contains the string “howl”. - - ``source_table:prod`` –> The source_table’s name contains - the string “prod”. - ``state:CREATING`` –> The backup is - pending creation. - ``state:READY`` –> The backup is fully - created and ready for use. - ``(name:howl) AND (start_time < - \"2018-03-28T14:50:00Z\")`` –> The backup name contains the - string “howl” and start_time of the backup is before - 2018-03-28T14:50:00Z. - ``size_bytes > 10000000000`` –> The - backup’s size is greater than 10GB - order_by: - An expression for specifying the sort order of the results of - the request. The string value should specify one or more - fields in [Backup][google.bigtable.admin.v2.Backup]. The full - syntax is described at https://aip.dev/132#ordering. Fields - supported are: \* name \* source_table \* expire_time \* - start_time \* end_time \* size_bytes \* state For example, - “start_time”. The default sorting order is ascending. To - specify descending order for the field, a suffix " desc" - should be appended to the field name. For example, “start_time - desc”. Redundant space characters in the syntax are - insigificant. If order_by is empty, results will be sorted by - ``start_time`` in descending order starting from the most - recently created backup. - page_size: - Number of backups to be returned in the response. If 0 or - less, defaults to the server’s maximum allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next_page_token - ][google.bigtable.admin.v2.ListBackupsResponse.next_page_token - ] from a previous [ListBackupsResponse][google.bigtable.admin. - v2.ListBackupsResponse] to the same ``parent`` and with the - same ``filter``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListBackupsRequest) - }, -) -_sym_db.RegisterMessage(ListBackupsRequest) - -ListBackupsResponse = _reflection.GeneratedProtocolMessageType( - "ListBackupsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTBACKUPSRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The response for [ListBackups][google.bigtable.admin.v2.BigtableTableA - dmin.ListBackups]. - - Attributes: - backups: - The list of matching backups. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent [ListBackups - ][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] - call to fetch more of the matching backups. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListBackupsResponse) - }, -) -_sym_db.RegisterMessage(ListBackupsResponse) - - -DESCRIPTOR._options = None -_RESTORETABLEREQUEST.fields_by_name["parent"]._options = None -_RESTORETABLEREQUEST.fields_by_name["table_id"]._options = None -_RESTORETABLEREQUEST.fields_by_name["backup"]._options = None -_CREATETABLEREQUEST.fields_by_name["parent"]._options = None -_CREATETABLEREQUEST.fields_by_name["table_id"]._options = None -_CREATETABLEREQUEST.fields_by_name["table"]._options = None -_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["parent"]._options = None -_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["table_id"]._options = None -_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["source_snapshot"]._options = None -_DROPROWRANGEREQUEST.fields_by_name["name"]._options = None -_LISTTABLESREQUEST.fields_by_name["parent"]._options = None -_GETTABLEREQUEST.fields_by_name["name"]._options = None -_DELETETABLEREQUEST.fields_by_name["name"]._options = None -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name["name"]._options = None -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name["modifications"]._options = None -_GENERATECONSISTENCYTOKENREQUEST.fields_by_name["name"]._options = None -_CHECKCONSISTENCYREQUEST.fields_by_name["name"]._options = None -_CHECKCONSISTENCYREQUEST.fields_by_name["consistency_token"]._options = None -_SNAPSHOTTABLEREQUEST.fields_by_name["name"]._options = None -_SNAPSHOTTABLEREQUEST.fields_by_name["cluster"]._options = None -_SNAPSHOTTABLEREQUEST.fields_by_name["snapshot_id"]._options = None -_GETSNAPSHOTREQUEST.fields_by_name["name"]._options = None -_LISTSNAPSHOTSREQUEST.fields_by_name["parent"]._options = None -_DELETESNAPSHOTREQUEST.fields_by_name["name"]._options = None -_CREATEBACKUPREQUEST.fields_by_name["parent"]._options = None -_CREATEBACKUPREQUEST.fields_by_name["backup_id"]._options = None -_CREATEBACKUPREQUEST.fields_by_name["backup"]._options = None -_UPDATEBACKUPREQUEST.fields_by_name["backup"]._options = None -_UPDATEBACKUPREQUEST.fields_by_name["update_mask"]._options = None -_GETBACKUPREQUEST.fields_by_name["name"]._options = None -_DELETEBACKUPREQUEST.fields_by_name["name"]._options = None -_LISTBACKUPSREQUEST.fields_by_name["parent"]._options = None - -_BIGTABLETABLEADMIN = _descriptor.ServiceDescriptor( - name="BigtableTableAdmin", - full_name="google.bigtable.admin.v2.BigtableTableAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\034bigtableadmin.googleapis.com\322A\273\002https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", - create_key=_descriptor._internal_create_key, - serialized_start=4689, - serialized_end=9449, - methods=[ - _descriptor.MethodDescriptor( - name="CreateTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", - index=0, - containing_service=None, - input_type=_CREATETABLEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=b'\202\323\344\223\002/"*/v2/{parent=projects/*/instances/*}/tables:\001*\332A\025parent,table_id,table', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateTableFromSnapshot", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot", - index=1, - containing_service=None, - input_type=_CREATETABLEFROMSNAPSHOTREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002B"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\001*\332A\037parent,table_id,source_snapshot\312A(\n\005Table\022\037CreateTableFromSnapshotMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListTables", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListTables", - index=2, - containing_service=None, - input_type=_LISTTABLESREQUEST, - output_type=_LISTTABLESRESPONSE, - serialized_options=b"\202\323\344\223\002,\022*/v2/{parent=projects/*/instances/*}/tables\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetTable", - index=3, - containing_service=None, - input_type=_GETTABLEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=b"\202\323\344\223\002,\022*/v2/{name=projects/*/instances/*/tables/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable", - index=4, - containing_service=None, - input_type=_DELETETABLEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002,**/v2/{name=projects/*/instances/*/tables/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ModifyColumnFamilies", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", - index=5, - containing_service=None, - input_type=_MODIFYCOLUMNFAMILIESREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=b'\202\323\344\223\002D"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\001*\332A\022name,modifications', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DropRowRange", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", - index=6, - containing_service=None, - input_type=_DROPROWRANGEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b'\202\323\344\223\002<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GenerateConsistencyToken", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", - index=7, - containing_service=None, - input_type=_GENERATECONSISTENCYTOKENREQUEST, - output_type=_GENERATECONSISTENCYTOKENRESPONSE, - serialized_options=b'\202\323\344\223\002H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\001*\332A\004name', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CheckConsistency", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", - index=8, - containing_service=None, - input_type=_CHECKCONSISTENCYREQUEST, - output_type=_CHECKCONSISTENCYRESPONSE, - serialized_options=b'\202\323\344\223\002@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\001*\332A\026name,consistency_token', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SnapshotTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable", - index=9, - containing_service=None, - input_type=_SNAPSHOTTABLEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0028"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\001*\332A$name,cluster,snapshot_id,description\312A!\n\010Snapshot\022\025SnapshotTableMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetSnapshot", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot", - index=10, - containing_service=None, - input_type=_GETSNAPSHOTREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._SNAPSHOT, - serialized_options=b"\202\323\344\223\002:\0228/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListSnapshots", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots", - index=11, - containing_service=None, - input_type=_LISTSNAPSHOTSREQUEST, - output_type=_LISTSNAPSHOTSRESPONSE, - serialized_options=b"\202\323\344\223\002:\0228/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteSnapshot", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot", - index=12, - containing_service=None, - input_type=_DELETESNAPSHOTREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateBackup", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup", - index=13, - containing_service=None, - input_type=_CREATEBACKUPREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\006backup\332A\027parent,backup_id,backup\312A\036\n\006Backup\022\024CreateBackupMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetBackup", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetBackup", - index=14, - containing_service=None, - input_type=_GETBACKUPREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP, - serialized_options=b"\202\323\344\223\0028\0226/v2/{name=projects/*/instances/*/clusters/*/backups/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateBackup", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup", - index=15, - containing_service=None, - input_type=_UPDATEBACKUPREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP, - serialized_options=b"\202\323\344\223\002G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\006backup\332A\022backup,update_mask", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteBackup", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup", - index=16, - containing_service=None, - input_type=_DELETEBACKUPREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\0028*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListBackups", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListBackups", - index=17, - containing_service=None, - input_type=_LISTBACKUPSREQUEST, - output_type=_LISTBACKUPSRESPONSE, - serialized_options=b"\202\323\344\223\0028\0226/v2/{parent=projects/*/instances/*/clusters/*}/backups\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="RestoreTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable", - index=18, - containing_service=None, - input_type=_RESTORETABLEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0027"2/v2/{parent=projects/*/instances/*}/tables:restore:\001*\312A\035\n\005Table\022\024RestoreTableMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", - index=19, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\002\216\001";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\001*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy:\001*\332A\010resource', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", - index=20, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\002\216\001";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\001*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\001*\332A\017resource,policy', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="TestIamPermissions", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", - index=21, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, - output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - serialized_options=b'\202\323\344\223\002\232\001"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\001*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\001*\332A\024resource,permissions', - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_BIGTABLETABLEADMIN) - -DESCRIPTOR.services_by_name["BigtableTableAdmin"] = _BIGTABLETABLEADMIN - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py deleted file mode 100644 index b9478a4d1..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ /dev/null @@ -1,1134 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2, -) -from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class BigtableTableAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - ) - self.CreateTableFromSnapshot = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ListTables = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, - ) - self.GetTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - ) - self.DeleteTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ModifyColumnFamilies = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - ) - self.DropRowRange = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.GenerateConsistencyToken = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, - ) - self.CheckConsistency = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, - ) - self.SnapshotTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetSnapshot = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, - ) - self.ListSnapshots = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, - ) - self.DeleteSnapshot = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateBackup = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetBackup = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, - ) - self.UpdateBackup = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, - ) - self.DeleteBackup = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ListBackups = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString, - ) - self.RestoreTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.SetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.TestIamPermissions = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - ) - - -class BigtableTableAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - def CreateTable(self, request, context): - """Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateTableFromSnapshot(self, request, context): - """Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListTables(self, request, context): - """Lists all tables served from a specified instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetTable(self, request, context): - """Gets metadata information about the specified table. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteTable(self, request, context): - """Permanently deletes a specified table and all of its data. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ModifyColumnFamilies(self, request, context): - """Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DropRowRange(self, request, context): - """Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GenerateConsistencyToken(self, request, context): - """Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CheckConsistency(self, request, context): - """Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SnapshotTable(self, request, context): - """Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetSnapshot(self, request, context): - """Gets metadata information about the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListSnapshots(self, request, context): - """Lists all snapshots associated with the specified cluster. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteSnapshot(self, request, context): - """Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateBackup(self, request, context): - """Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be used to - track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The - [response][google.longrunning.Operation.response] field type is - [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the - creation and delete the backup. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetBackup(self, request, context): - """Gets metadata on a pending or completed Cloud Bigtable Backup. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateBackup(self, request, context): - """Updates a pending or completed Cloud Bigtable Backup. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteBackup(self, request, context): - """Deletes a pending or completed Cloud Bigtable backup. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListBackups(self, request, context): - """Lists Cloud Bigtable backups. Returns both completed and pending - backups. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def RestoreTable(self, request, context): - """Create a new table by restoring from a completed backup. The new table - must be in the same instance as the instance containing the backup. The - returned table [long-running operation][google.longrunning.Operation] can - be used to track the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The - [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIamPolicy(self, request, context): - """Gets the access control policy for a Table or Backup resource. - Returns an empty policy if the resource exists but does not have a policy - set. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SetIamPolicy(self, request, context): - """Sets the access control policy on a Table or Backup resource. - Replaces any existing policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified Table or Backup resource. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_BigtableTableAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateTable": grpc.unary_unary_rpc_method_handler( - servicer.CreateTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - "CreateTableFromSnapshot": grpc.unary_unary_rpc_method_handler( - servicer.CreateTableFromSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ListTables": grpc.unary_unary_rpc_method_handler( - servicer.ListTables, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString, - ), - "GetTable": grpc.unary_unary_rpc_method_handler( - servicer.GetTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - "DeleteTable": grpc.unary_unary_rpc_method_handler( - servicer.DeleteTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ModifyColumnFamilies": grpc.unary_unary_rpc_method_handler( - servicer.ModifyColumnFamilies, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - "DropRowRange": grpc.unary_unary_rpc_method_handler( - servicer.DropRowRange, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GenerateConsistencyToken": grpc.unary_unary_rpc_method_handler( - servicer.GenerateConsistencyToken, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString, - ), - "CheckConsistency": grpc.unary_unary_rpc_method_handler( - servicer.CheckConsistency, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString, - ), - "SnapshotTable": grpc.unary_unary_rpc_method_handler( - servicer.SnapshotTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetSnapshot": grpc.unary_unary_rpc_method_handler( - servicer.GetSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString, - ), - "ListSnapshots": grpc.unary_unary_rpc_method_handler( - servicer.ListSnapshots, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString, - ), - "DeleteSnapshot": grpc.unary_unary_rpc_method_handler( - servicer.DeleteSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "CreateBackup": grpc.unary_unary_rpc_method_handler( - servicer.CreateBackup, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetBackup": grpc.unary_unary_rpc_method_handler( - servicer.GetBackup, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString, - ), - "UpdateBackup": grpc.unary_unary_rpc_method_handler( - servicer.UpdateBackup, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString, - ), - "DeleteBackup": grpc.unary_unary_rpc_method_handler( - servicer.DeleteBackup, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ListBackups": grpc.unary_unary_rpc_method_handler( - servicer.ListBackups, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.SerializeToString, - ), - "RestoreTable": grpc.unary_unary_rpc_method_handler( - servicer.RestoreTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "SetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "TestIamPermissions": grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.bigtable.admin.v2.BigtableTableAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class BigtableTableAdmin(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - @staticmethod - def CreateTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateTableFromSnapshot( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListTables( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ModifyColumnFamilies( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DropRowRange( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GenerateConsistencyToken( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CheckConsistency( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SnapshotTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetSnapshot( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListSnapshots( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteSnapshot( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListBackups( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def RestoreTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def TestIamPermissions( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/google/cloud/bigtable_admin_v2/proto/common_pb2.py deleted file mode 100644 index 09233cff5..000000000 --- a/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ /dev/null @@ -1,188 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/common.proto -"""Generated protocol buffer code.""" -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/common.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n1google/cloud/bigtable_admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/protobuf/timestamp.proto"\x8b\x01\n\x11OperationProgress\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xd3\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,], -) - -_STORAGETYPE = _descriptor.EnumDescriptor( - name="StorageType", - full_name="google.bigtable.admin.v2.StorageType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STORAGE_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SSD", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="HDD", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=254, - serialized_end=315, -) -_sym_db.RegisterEnumDescriptor(_STORAGETYPE) - -StorageType = enum_type_wrapper.EnumTypeWrapper(_STORAGETYPE) -STORAGE_TYPE_UNSPECIFIED = 0 -SSD = 1 -HDD = 2 - - -_OPERATIONPROGRESS = _descriptor.Descriptor( - name="OperationProgress", - full_name="google.bigtable.admin.v2.OperationProgress", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="progress_percent", - full_name="google.bigtable.admin.v2.OperationProgress.progress_percent", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.bigtable.admin.v2.OperationProgress.start_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.bigtable.admin.v2.OperationProgress.end_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=113, - serialized_end=252, -) - -_OPERATIONPROGRESS.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_OPERATIONPROGRESS.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["OperationProgress"] = _OPERATIONPROGRESS -DESCRIPTOR.enum_types_by_name["StorageType"] = _STORAGETYPE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -OperationProgress = _reflection.GeneratedProtocolMessageType( - "OperationProgress", - (_message.Message,), - { - "DESCRIPTOR": _OPERATIONPROGRESS, - "__module__": "google.cloud.bigtable_admin_v2.proto.common_pb2", - "__doc__": """Encapsulates progress related information for a Cloud Bigtable long - running operation. - - Attributes: - progress_percent: - Percent completion of the operation. Values are between 0 and - 100 inclusive. - start_time: - Time the request was received. - end_time: - If set, the time at which this operation failed or was - completed successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.OperationProgress) - }, -) -_sym_db.RegisterMessage(OperationProgress) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py deleted file mode 100644 index 8a9393943..000000000 --- a/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/google/cloud/bigtable_admin_v2/proto/instance_pb2.py deleted file mode 100644 index e0138e0fb..000000000 --- a/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ /dev/null @@ -1,886 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/instance.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_admin_v2.proto import ( - common_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2, -) - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/instance.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n3google/cloud/bigtable_admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto"\xdd\x03\n\x08Instance\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x19\n\x0c\x64isplay_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02:N\xea\x41K\n bigtable.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}"\xa7\x03\n\x07\x43luster\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x38\n\x08location\x18\x02 \x01(\tB&\xfa\x41#\n!locations.googleapis.com/Location\x12;\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.StateB\x03\xe0\x41\x03\x12\x18\n\x0bserve_nodes\x18\x04 \x01(\x05\x42\x03\xe0\x41\x02\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04:`\xea\x41]\n\x1f\x62igtable.googleapis.com/Cluster\x12:projects/{project}/instances/{instance}/clusters/{cluster}"\xee\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08:j\xea\x41g\n"bigtable.googleapis.com/AppProfile\x12\x41projects/{project}/instances/{instance}/appProfiles/{app_profile}B\x10\n\x0erouting_policyB\xd5\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, - ], -) - - -_INSTANCE_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Instance.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=474, - serialized_end=527, -) -_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) - -_INSTANCE_TYPE = _descriptor.EnumDescriptor( - name="Type", - full_name="google.bigtable.admin.v2.Instance.Type", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PRODUCTION", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DEVELOPMENT", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=529, - serialized_end=590, -) -_sym_db.RegisterEnumDescriptor(_INSTANCE_TYPE) - -_CLUSTER_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Cluster.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RESIZING", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DISABLED", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=917, - serialized_end=998, -) -_sym_db.RegisterEnumDescriptor(_CLUSTER_STATE) - - -_INSTANCE_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.bigtable.admin.v2.Instance.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.Instance.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.Instance.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=427, - serialized_end=472, -) - -_INSTANCE = _descriptor.Descriptor( - name="Instance", - full_name="google.bigtable.admin.v2.Instance", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Instance.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.bigtable.admin.v2.Instance.display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Instance.state", - index=2, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="type", - full_name="google.bigtable.admin.v2.Instance.type", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.bigtable.admin.v2.Instance.labels", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_INSTANCE_LABELSENTRY,], - enum_types=[_INSTANCE_STATE, _INSTANCE_TYPE,], - serialized_options=b"\352AK\n bigtable.googleapis.com/Instance\022'projects/{project}/instances/{instance}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=193, - serialized_end=670, -) - - -_CLUSTER = _descriptor.Descriptor( - name="Cluster", - full_name="google.bigtable.admin.v2.Cluster", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Cluster.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="location", - full_name="google.bigtable.admin.v2.Cluster.location", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\372A#\n!locations.googleapis.com/Location", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Cluster.state", - index=2, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="serve_nodes", - full_name="google.bigtable.admin.v2.Cluster.serve_nodes", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="default_storage_type", - full_name="google.bigtable.admin.v2.Cluster.default_storage_type", - index=4, - number=5, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_CLUSTER_STATE,], - serialized_options=b"\352A]\n\037bigtable.googleapis.com/Cluster\022:projects/{project}/instances/{instance}/clusters/{cluster}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=673, - serialized_end=1096, -) - - -_APPPROFILE_MULTICLUSTERROUTINGUSEANY = _descriptor.Descriptor( - name="MultiClusterRoutingUseAny", - full_name="google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1360, - serialized_end=1387, -) - -_APPPROFILE_SINGLECLUSTERROUTING = _descriptor.Descriptor( - name="SingleClusterRouting", - full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="cluster_id", - full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting.cluster_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="allow_transactional_writes", - full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting.allow_transactional_writes", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1389, - serialized_end=1467, -) - -_APPPROFILE = _descriptor.Descriptor( - name="AppProfile", - full_name="google.bigtable.admin.v2.AppProfile", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.AppProfile.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="etag", - full_name="google.bigtable.admin.v2.AppProfile.etag", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.bigtable.admin.v2.AppProfile.description", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="multi_cluster_routing_use_any", - full_name="google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any", - index=3, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="single_cluster_routing", - full_name="google.bigtable.admin.v2.AppProfile.single_cluster_routing", - index=4, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _APPPROFILE_MULTICLUSTERROUTINGUSEANY, - _APPPROFILE_SINGLECLUSTERROUTING, - ], - enum_types=[], - serialized_options=b'\352Ag\n"bigtable.googleapis.com/AppProfile\022Aprojects/{project}/instances/{instance}/appProfiles/{app_profile}', - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="routing_policy", - full_name="google.bigtable.admin.v2.AppProfile.routing_policy", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1099, - serialized_end=1593, -) - -_INSTANCE_LABELSENTRY.containing_type = _INSTANCE -_INSTANCE.fields_by_name["state"].enum_type = _INSTANCE_STATE -_INSTANCE.fields_by_name["type"].enum_type = _INSTANCE_TYPE -_INSTANCE.fields_by_name["labels"].message_type = _INSTANCE_LABELSENTRY -_INSTANCE_STATE.containing_type = _INSTANCE -_INSTANCE_TYPE.containing_type = _INSTANCE -_CLUSTER.fields_by_name["state"].enum_type = _CLUSTER_STATE -_CLUSTER.fields_by_name[ - "default_storage_type" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._STORAGETYPE -) -_CLUSTER_STATE.containing_type = _CLUSTER -_APPPROFILE_MULTICLUSTERROUTINGUSEANY.containing_type = _APPPROFILE -_APPPROFILE_SINGLECLUSTERROUTING.containing_type = _APPPROFILE -_APPPROFILE.fields_by_name[ - "multi_cluster_routing_use_any" -].message_type = _APPPROFILE_MULTICLUSTERROUTINGUSEANY -_APPPROFILE.fields_by_name[ - "single_cluster_routing" -].message_type = _APPPROFILE_SINGLECLUSTERROUTING -_APPPROFILE.oneofs_by_name["routing_policy"].fields.append( - _APPPROFILE.fields_by_name["multi_cluster_routing_use_any"] -) -_APPPROFILE.fields_by_name[ - "multi_cluster_routing_use_any" -].containing_oneof = _APPPROFILE.oneofs_by_name["routing_policy"] -_APPPROFILE.oneofs_by_name["routing_policy"].fields.append( - _APPPROFILE.fields_by_name["single_cluster_routing"] -) -_APPPROFILE.fields_by_name[ - "single_cluster_routing" -].containing_oneof = _APPPROFILE.oneofs_by_name["routing_policy"] -DESCRIPTOR.message_types_by_name["Instance"] = _INSTANCE -DESCRIPTOR.message_types_by_name["Cluster"] = _CLUSTER -DESCRIPTOR.message_types_by_name["AppProfile"] = _APPPROFILE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Instance = _reflection.GeneratedProtocolMessageType( - "Instance", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _INSTANCE_LABELSENTRY, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance.LabelsEntry) - }, - ), - "DESCRIPTOR": _INSTANCE, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an instance are served - from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. - - Attributes: - name: - The unique name of the instance. Values are of the form - ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. - display_name: - Required. The descriptive name for this instance as it appears - in UIs. Can be changed at any time, but should be kept - globally unique to avoid confusion. - state: - (\ ``OutputOnly``) The current state of the instance. - type: - The type of the instance. Defaults to ``PRODUCTION``. - labels: - Labels are a flexible and lightweight mechanism for organizing - cloud resources into groups that reflect a customer’s - organizational needs and deployment strategies. They can be - used to filter resources and aggregate metrics. - Label keys - must be between 1 and 63 characters long and must conform - to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - Label values - must be between 0 and 63 characters long and must conform - to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given - resource. - Keys and values must both be under 128 bytes. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) - }, -) -_sym_db.RegisterMessage(Instance) -_sym_db.RegisterMessage(Instance.LabelsEntry) - -Cluster = _reflection.GeneratedProtocolMessageType( - "Cluster", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTER, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """A resizable group of nodes in a particular cloud location, capable of - serving all [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. - - Attributes: - name: - The unique name of the cluster. Values are of the form ``proje - cts/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. - location: - (\ ``CreationOnly``) The location where this cluster’s nodes - and storage reside. For best performance, clients should be - located as close as possible to this cluster. Currently only - zones are supported, so values should be of the form - ``projects/{project}/locations/{zone}``. - state: - The current state of the cluster. - serve_nodes: - Required. The number of nodes allocated to this cluster. More - nodes enable higher throughput and more consistent - performance. - default_storage_type: - (\ ``CreationOnly``) The type of storage used by this cluster - to serve its parent instance’s tables, unless explicitly - overridden. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) - }, -) -_sym_db.RegisterMessage(Cluster) - -AppProfile = _reflection.GeneratedProtocolMessageType( - "AppProfile", - (_message.Message,), - { - "MultiClusterRoutingUseAny": _reflection.GeneratedProtocolMessageType( - "MultiClusterRoutingUseAny", - (_message.Message,), - { - "DESCRIPTOR": _APPPROFILE_MULTICLUSTERROUTINGUSEANY, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """Read/write requests are routed to the nearest cluster in the instance, - and will fail over to the nearest cluster that is available in the - event of transient errors or delays. Clusters in a region are - considered equidistant. Choosing this option sacrifices read-your- - writes consistency to improve availability.""", - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny) - }, - ), - "SingleClusterRouting": _reflection.GeneratedProtocolMessageType( - "SingleClusterRouting", - (_message.Message,), - { - "DESCRIPTOR": _APPPROFILE_SINGLECLUSTERROUTING, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """Unconditionally routes all read/write requests to a specific cluster. - This option preserves read-your-writes consistency but does not - improve availability. - - Attributes: - cluster_id: - The cluster to which read/write requests should be routed. - allow_transactional_writes: - Whether or not ``CheckAndMutateRow`` and - ``ReadModifyWriteRow`` requests are allowed by this app - profile. It is unsafe to send these requests to the same - table/row/column in multiple clusters. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.SingleClusterRouting) - }, - ), - "DESCRIPTOR": _APPPROFILE, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """A configuration object describing how Cloud Bigtable should treat - traffic from a particular end user application. - - Attributes: - name: - (\ ``OutputOnly``) The unique name of the app profile. Values - are of the form - ``projects//instances//appProfiles/[_a- - zA-Z0-9][-_.a-zA-Z0-9]*``. - etag: - Strongly validated etag for optimistic concurrency control. - Preserve the value returned from ``GetAppProfile`` when - calling ``UpdateAppProfile`` to fail the request if there has - been a modification in the mean time. The ``update_mask`` of - the request need not include ``etag`` for this protection to - apply. See `Wikipedia - `__ and `RFC 7232 - `__ for more - details. - description: - Optional long form description of the use case for this - AppProfile. - routing_policy: - The routing policy for all read/write requests that use this - app profile. A value must be explicitly set. - multi_cluster_routing_use_any: - Use a multi-cluster routing policy. - single_cluster_routing: - Use a single-cluster routing policy. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile) - }, -) -_sym_db.RegisterMessage(AppProfile) -_sym_db.RegisterMessage(AppProfile.MultiClusterRoutingUseAny) -_sym_db.RegisterMessage(AppProfile.SingleClusterRouting) - - -DESCRIPTOR._options = None -_INSTANCE_LABELSENTRY._options = None -_INSTANCE.fields_by_name["name"]._options = None -_INSTANCE.fields_by_name["display_name"]._options = None -_INSTANCE._options = None -_CLUSTER.fields_by_name["name"]._options = None -_CLUSTER.fields_by_name["location"]._options = None -_CLUSTER.fields_by_name["state"]._options = None -_CLUSTER.fields_by_name["serve_nodes"]._options = None -_CLUSTER._options = None -_APPPROFILE._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py deleted file mode 100644 index 8a9393943..000000000 --- a/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/google/cloud/bigtable_admin_v2/proto/table_pb2.py deleted file mode 100644 index 67238a81e..000000000 --- a/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ /dev/null @@ -1,1682 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/table.proto -"""Generated protocol buffer code.""" -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/table.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n0google/cloud/bigtable_admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x9b\x01\n\x0bRestoreInfo\x12@\n\x0bsource_type\x18\x01 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x02 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x42\r\n\x0bsource_info"\xfb\x07\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x12;\n\x0crestore_info\x18\x06 \x01(\x0b\x32%.google.bigtable.admin.v2.RestoreInfo\x1a\xf9\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState"\x8e\x01\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x12\x14\n\x10READY_OPTIMIZING\x10\x05\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04:Z\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule"\xc7\x03\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02:v\xea\x41s\n bigtable.googleapis.com/Snapshot\x12Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}"\xd7\x03\n\x06\x42\x61\x63kup\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x1c\n\x0csource_table\x18\x02 \x01(\tB\x06\xe0\x41\x05\xe0\x41\x02\x12\x34\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x02\x12\x33\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x17\n\nsize_bytes\x18\x06 \x01(\x03\x42\x03\xe0\x41\x03\x12:\n\x05state\x18\x07 \x01(\x0e\x32&.google.bigtable.admin.v2.Backup.StateB\x03\xe0\x41\x03"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:p\xea\x41m\n\x1e\x62igtable.googleapis.com/Backup\x12Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}"\xa4\x01\n\nBackupInfo\x12\x13\n\x06\x62\x61\x63kup\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x33\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x19\n\x0csource_table\x18\x04 \x01(\tB\x03\xe0\x41\x03*D\n\x11RestoreSourceType\x12#\n\x1fRESTORE_SOURCE_TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06\x42\x41\x43KUP\x10\x01\x42\xd2\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - -_RESTORESOURCETYPE = _descriptor.EnumDescriptor( - name="RestoreSourceType", - full_name="google.bigtable.admin.v2.RestoreSourceType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="RESTORE_SOURCE_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="BACKUP", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2893, - serialized_end=2961, -) -_sym_db.RegisterEnumDescriptor(_RESTORESOURCETYPE) - -RestoreSourceType = enum_type_wrapper.EnumTypeWrapper(_RESTORESOURCETYPE) -RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 -BACKUP = 1 - - -_TABLE_CLUSTERSTATE_REPLICATIONSTATE = _descriptor.EnumDescriptor( - name="ReplicationState", - full_name="google.bigtable.admin.v2.Table.ClusterState.ReplicationState", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="INITIALIZING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PLANNED_MAINTENANCE", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="UNPLANNED_MAINTENANCE", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY_OPTIMIZING", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=783, - serialized_end=925, -) -_sym_db.RegisterEnumDescriptor(_TABLE_CLUSTERSTATE_REPLICATIONSTATE) - -_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( - name="TimestampGranularity", - full_name="google.bigtable.admin.v2.Table.TimestampGranularity", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TIMESTAMP_GRANULARITY_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="MILLIS", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1122, - serialized_end=1195, -) -_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) - -_TABLE_VIEW = _descriptor.EnumDescriptor( - name="View", - full_name="google.bigtable.admin.v2.Table.View", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="VIEW_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NAME_ONLY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SCHEMA_VIEW", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="REPLICATION_VIEW", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FULL", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1197, - serialized_end=1289, -) -_sym_db.RegisterEnumDescriptor(_TABLE_VIEW) - -_SNAPSHOT_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Snapshot.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2077, - serialized_end=2130, -) -_sym_db.RegisterEnumDescriptor(_SNAPSHOT_STATE) - -_BACKUP_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Backup.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2555, - serialized_end=2610, -) -_sym_db.RegisterEnumDescriptor(_BACKUP_STATE) - - -_RESTOREINFO = _descriptor.Descriptor( - name="RestoreInfo", - full_name="google.bigtable.admin.v2.RestoreInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="source_type", - full_name="google.bigtable.admin.v2.RestoreInfo.source_type", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup_info", - full_name="google.bigtable.admin.v2.RestoreInfo.backup_info", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source_info", - full_name="google.bigtable.admin.v2.RestoreInfo.source_info", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=204, - serialized_end=359, -) - - -_TABLE_CLUSTERSTATE = _descriptor.Descriptor( - name="ClusterState", - full_name="google.bigtable.admin.v2.Table.ClusterState", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="replication_state", - full_name="google.bigtable.admin.v2.Table.ClusterState.replication_state", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_TABLE_CLUSTERSTATE_REPLICATIONSTATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=676, - serialized_end=925, -) - -_TABLE_CLUSTERSTATESENTRY = _descriptor.Descriptor( - name="ClusterStatesEntry", - full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=927, - serialized_end=1025, -) - -_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( - name="ColumnFamiliesEntry", - full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1027, - serialized_end=1120, -) - -_TABLE = _descriptor.Descriptor( - name="Table", - full_name="google.bigtable.admin.v2.Table", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Table.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_states", - full_name="google.bigtable.admin.v2.Table.cluster_states", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_families", - full_name="google.bigtable.admin.v2.Table.column_families", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="granularity", - full_name="google.bigtable.admin.v2.Table.granularity", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="restore_info", - full_name="google.bigtable.admin.v2.Table.restore_info", - index=4, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _TABLE_CLUSTERSTATE, - _TABLE_CLUSTERSTATESENTRY, - _TABLE_COLUMNFAMILIESENTRY, - ], - enum_types=[_TABLE_TIMESTAMPGRANULARITY, _TABLE_VIEW,], - serialized_options=b"\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=362, - serialized_end=1381, -) - - -_COLUMNFAMILY = _descriptor.Descriptor( - name="ColumnFamily", - full_name="google.bigtable.admin.v2.ColumnFamily", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gc_rule", - full_name="google.bigtable.admin.v2.ColumnFamily.gc_rule", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1383, - serialized_end=1448, -) - - -_GCRULE_INTERSECTION = _descriptor.Descriptor( - name="Intersection", - full_name="google.bigtable.admin.v2.GcRule.Intersection", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="rules", - full_name="google.bigtable.admin.v2.GcRule.Intersection.rules", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1663, - serialized_end=1726, -) - -_GCRULE_UNION = _descriptor.Descriptor( - name="Union", - full_name="google.bigtable.admin.v2.GcRule.Union", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="rules", - full_name="google.bigtable.admin.v2.GcRule.Union.rules", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1728, - serialized_end=1784, -) - -_GCRULE = _descriptor.Descriptor( - name="GcRule", - full_name="google.bigtable.admin.v2.GcRule", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="max_num_versions", - full_name="google.bigtable.admin.v2.GcRule.max_num_versions", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="max_age", - full_name="google.bigtable.admin.v2.GcRule.max_age", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="intersection", - full_name="google.bigtable.admin.v2.GcRule.intersection", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="union", - full_name="google.bigtable.admin.v2.GcRule.union", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="rule", - full_name="google.bigtable.admin.v2.GcRule.rule", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1451, - serialized_end=1792, -) - - -_SNAPSHOT = _descriptor.Descriptor( - name="Snapshot", - full_name="google.bigtable.admin.v2.Snapshot", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Snapshot.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.Snapshot.source_table", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="data_size_bytes", - full_name="google.bigtable.admin.v2.Snapshot.data_size_bytes", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.bigtable.admin.v2.Snapshot.create_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_time", - full_name="google.bigtable.admin.v2.Snapshot.delete_time", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Snapshot.state", - index=5, - number=6, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.bigtable.admin.v2.Snapshot.description", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_SNAPSHOT_STATE,], - serialized_options=b"\352As\n bigtable.googleapis.com/Snapshot\022Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1795, - serialized_end=2250, -) - - -_BACKUP = _descriptor.Descriptor( - name="Backup", - full_name="google.bigtable.admin.v2.Backup", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Backup.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.Backup.source_table", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\005\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="expire_time", - full_name="google.bigtable.admin.v2.Backup.expire_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.bigtable.admin.v2.Backup.start_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.bigtable.admin.v2.Backup.end_time", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="size_bytes", - full_name="google.bigtable.admin.v2.Backup.size_bytes", - index=5, - number=6, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Backup.state", - index=6, - number=7, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_BACKUP_STATE,], - serialized_options=b"\352Am\n\036bigtable.googleapis.com/Backup\022Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2253, - serialized_end=2724, -) - - -_BACKUPINFO = _descriptor.Descriptor( - name="BackupInfo", - full_name="google.bigtable.admin.v2.BackupInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.BackupInfo.backup", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.bigtable.admin.v2.BackupInfo.start_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.bigtable.admin.v2.BackupInfo.end_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.BackupInfo.source_table", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2727, - serialized_end=2891, -) - -_RESTOREINFO.fields_by_name["source_type"].enum_type = _RESTORESOURCETYPE -_RESTOREINFO.fields_by_name["backup_info"].message_type = _BACKUPINFO -_RESTOREINFO.oneofs_by_name["source_info"].fields.append( - _RESTOREINFO.fields_by_name["backup_info"] -) -_RESTOREINFO.fields_by_name[ - "backup_info" -].containing_oneof = _RESTOREINFO.oneofs_by_name["source_info"] -_TABLE_CLUSTERSTATE.fields_by_name[ - "replication_state" -].enum_type = _TABLE_CLUSTERSTATE_REPLICATIONSTATE -_TABLE_CLUSTERSTATE.containing_type = _TABLE -_TABLE_CLUSTERSTATE_REPLICATIONSTATE.containing_type = _TABLE_CLUSTERSTATE -_TABLE_CLUSTERSTATESENTRY.fields_by_name["value"].message_type = _TABLE_CLUSTERSTATE -_TABLE_CLUSTERSTATESENTRY.containing_type = _TABLE -_TABLE_COLUMNFAMILIESENTRY.fields_by_name["value"].message_type = _COLUMNFAMILY -_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE -_TABLE.fields_by_name["cluster_states"].message_type = _TABLE_CLUSTERSTATESENTRY -_TABLE.fields_by_name["column_families"].message_type = _TABLE_COLUMNFAMILIESENTRY -_TABLE.fields_by_name["granularity"].enum_type = _TABLE_TIMESTAMPGRANULARITY -_TABLE.fields_by_name["restore_info"].message_type = _RESTOREINFO -_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE -_TABLE_VIEW.containing_type = _TABLE -_COLUMNFAMILY.fields_by_name["gc_rule"].message_type = _GCRULE -_GCRULE_INTERSECTION.fields_by_name["rules"].message_type = _GCRULE -_GCRULE_INTERSECTION.containing_type = _GCRULE -_GCRULE_UNION.fields_by_name["rules"].message_type = _GCRULE -_GCRULE_UNION.containing_type = _GCRULE -_GCRULE.fields_by_name[ - "max_age" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_GCRULE.fields_by_name["intersection"].message_type = _GCRULE_INTERSECTION -_GCRULE.fields_by_name["union"].message_type = _GCRULE_UNION -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["max_num_versions"]) -_GCRULE.fields_by_name["max_num_versions"].containing_oneof = _GCRULE.oneofs_by_name[ - "rule" -] -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["max_age"]) -_GCRULE.fields_by_name["max_age"].containing_oneof = _GCRULE.oneofs_by_name["rule"] -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["intersection"]) -_GCRULE.fields_by_name["intersection"].containing_oneof = _GCRULE.oneofs_by_name["rule"] -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["union"]) -_GCRULE.fields_by_name["union"].containing_oneof = _GCRULE.oneofs_by_name["rule"] -_SNAPSHOT.fields_by_name["source_table"].message_type = _TABLE -_SNAPSHOT.fields_by_name[ - "create_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOT.fields_by_name[ - "delete_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOT.fields_by_name["state"].enum_type = _SNAPSHOT_STATE -_SNAPSHOT_STATE.containing_type = _SNAPSHOT -_BACKUP.fields_by_name[ - "expire_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUP.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUP.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUP.fields_by_name["state"].enum_type = _BACKUP_STATE -_BACKUP_STATE.containing_type = _BACKUP -_BACKUPINFO.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUPINFO.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["RestoreInfo"] = _RESTOREINFO -DESCRIPTOR.message_types_by_name["Table"] = _TABLE -DESCRIPTOR.message_types_by_name["ColumnFamily"] = _COLUMNFAMILY -DESCRIPTOR.message_types_by_name["GcRule"] = _GCRULE -DESCRIPTOR.message_types_by_name["Snapshot"] = _SNAPSHOT -DESCRIPTOR.message_types_by_name["Backup"] = _BACKUP -DESCRIPTOR.message_types_by_name["BackupInfo"] = _BACKUPINFO -DESCRIPTOR.enum_types_by_name["RestoreSourceType"] = _RESTORESOURCETYPE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -RestoreInfo = _reflection.GeneratedProtocolMessageType( - "RestoreInfo", - (_message.Message,), - { - "DESCRIPTOR": _RESTOREINFO, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """Information about a table restore. - - Attributes: - source_type: - The type of the restore source. - source_info: - Information about the source used to restore the table. - backup_info: - Information about the backup used to restore the table. The - backup may no longer exist. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreInfo) - }, -) -_sym_db.RegisterMessage(RestoreInfo) - -Table = _reflection.GeneratedProtocolMessageType( - "Table", - (_message.Message,), - { - "ClusterState": _reflection.GeneratedProtocolMessageType( - "ClusterState", - (_message.Message,), - { - "DESCRIPTOR": _TABLE_CLUSTERSTATE, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """The state of a table’s data in a particular cluster. - - Attributes: - replication_state: - Output only. The state of replication for the table in this - cluster. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterState) - }, - ), - "ClusterStatesEntry": _reflection.GeneratedProtocolMessageType( - "ClusterStatesEntry", - (_message.Message,), - { - "DESCRIPTOR": _TABLE_CLUSTERSTATESENTRY, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterStatesEntry) - }, - ), - "ColumnFamiliesEntry": _reflection.GeneratedProtocolMessageType( - "ColumnFamiliesEntry", - (_message.Message,), - { - "DESCRIPTOR": _TABLE_COLUMNFAMILIESENTRY, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) - }, - ), - "DESCRIPTOR": _TABLE, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A collection of user data indexed by row, column, and timestamp. Each - table is served using the resources of its parent cluster. - - Attributes: - name: - Output only. The unique name of the table. Values are of the - form ``projects//instances//tables/[_a- - zA-Z0-9][-_.a-zA-Z0-9]*``. Views: ``NAME_ONLY``, - ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, ``FULL`` - cluster_states: - Output only. Map from cluster ID to per-cluster table state. - If it could not be determined whether or not the table has - data in a particular cluster (for example, if its zone is - unavailable), then there will be an entry for the cluster with - UNKNOWN ``replication_status``. Views: ``REPLICATION_VIEW``, - ``FULL`` - column_families: - (\ ``CreationOnly``) The column families configured for this - table, mapped by column family ID. Views: ``SCHEMA_VIEW``, - ``FULL`` - granularity: - (\ ``CreationOnly``) The granularity (i.e. ``MILLIS``) at - which timestamps are stored in this table. Timestamps not - matching the granularity will be rejected. If unspecified at - creation time, the value will be set to ``MILLIS``. Views: - ``SCHEMA_VIEW``, ``FULL``. - restore_info: - Output only. If this table was restored from another data - source (e.g. a backup), this field will be populated with - information about the restore. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) - }, -) -_sym_db.RegisterMessage(Table) -_sym_db.RegisterMessage(Table.ClusterState) -_sym_db.RegisterMessage(Table.ClusterStatesEntry) -_sym_db.RegisterMessage(Table.ColumnFamiliesEntry) - -ColumnFamily = _reflection.GeneratedProtocolMessageType( - "ColumnFamily", - (_message.Message,), - { - "DESCRIPTOR": _COLUMNFAMILY, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A set of columns within a table which share a common configuration. - - Attributes: - gc_rule: - Garbage collection rule specified as a protobuf. Must - serialize to at most 500 bytes. NOTE: Garbage collection - executes opportunistically in the background, and so it’s - possible for reads to return a cell even if it matches the - active GC expression for its family. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) - }, -) -_sym_db.RegisterMessage(ColumnFamily) - -GcRule = _reflection.GeneratedProtocolMessageType( - "GcRule", - (_message.Message,), - { - "Intersection": _reflection.GeneratedProtocolMessageType( - "Intersection", - (_message.Message,), - { - "DESCRIPTOR": _GCRULE_INTERSECTION, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A GcRule which deletes cells matching all of the given rules. - - Attributes: - rules: - Only delete cells which would be deleted by every element of - ``rules``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) - }, - ), - "Union": _reflection.GeneratedProtocolMessageType( - "Union", - (_message.Message,), - { - "DESCRIPTOR": _GCRULE_UNION, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A GcRule which deletes cells matching any of the given rules. - - Attributes: - rules: - Delete cells which would be deleted by any element of - ``rules``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) - }, - ), - "DESCRIPTOR": _GCRULE, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """Rule for determining which cells to delete during garbage collection. - - Attributes: - rule: - Garbage collection rules. - max_num_versions: - Delete all cells in a column except the most recent N. - max_age: - Delete cells in a column older than the given age. Values must - be at least one millisecond, and will be truncated to - microsecond granularity. - intersection: - Delete cells that would be deleted by every nested rule. - union: - Delete cells that would be deleted by any nested rule. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) - }, -) -_sym_db.RegisterMessage(GcRule) -_sym_db.RegisterMessage(GcRule.Intersection) -_sym_db.RegisterMessage(GcRule.Union) - -Snapshot = _reflection.GeneratedProtocolMessageType( - "Snapshot", - (_message.Message,), - { - "DESCRIPTOR": _SNAPSHOT, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A snapshot of a table at a particular time. A snapshot can be used as - a checkpoint for data restoration or a data source for a new table. - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - name: - Output only. The unique name of the snapshot. Values are of - the form ``projects//instances//clusters//snapshots/``. - source_table: - Output only. The source table at the time the snapshot was - taken. - data_size_bytes: - Output only. The size of the data in the source table at the - time the snapshot was taken. In some cases, this value may be - computed asynchronously via a background process and a - placeholder of 0 will be used in the meantime. - create_time: - Output only. The time when the snapshot is created. - delete_time: - Output only. The time when the snapshot will be deleted. The - maximum amount of time a snapshot can stay active is 365 days. - If ‘ttl’ is not specified, the default maximum of 365 days - will be used. - state: - Output only. The current state of the snapshot. - description: - Output only. Description of the snapshot. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Snapshot) - }, -) -_sym_db.RegisterMessage(Snapshot) - -Backup = _reflection.GeneratedProtocolMessageType( - "Backup", - (_message.Message,), - { - "DESCRIPTOR": _BACKUP, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A backup of a Cloud Bigtable table. - - Attributes: - name: - Output only. A globally unique identifier for the backup which - cannot be changed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/ - backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` The final segment of the - name must be between 1 and 50 characters in length. The - backup is stored in the cluster identified by the prefix of - the backup name of the form ``projects/{project}/instances/{in - stance}/clusters/{cluster}``. - source_table: - Required. Immutable. Name of the table from which this backup - was created. This needs to be in the same instance as the - backup. Values are of the form ``projects/{project}/instances/ - {instance}/tables/{source_table}``. - expire_time: - Required. The expiration time of the backup, with microseconds - granularity that must be at least 6 hours and at most 30 days - from the time the request is received. Once the - ``expire_time`` has passed, Cloud Bigtable will delete the - backup and free the resources used by the backup. - start_time: - Output only. ``start_time`` is the time that the backup was - started (i.e. approximately the time the [CreateBackup][google - .bigtable.admin.v2.BigtableTableAdmin.CreateBackup] request is - received). The row data in this backup will be no older than - this timestamp. - end_time: - Output only. ``end_time`` is the time that the backup was - finished. The row data in the backup will be no newer than - this timestamp. - size_bytes: - Output only. Size of the backup in bytes. - state: - Output only. The current state of the backup. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Backup) - }, -) -_sym_db.RegisterMessage(Backup) - -BackupInfo = _reflection.GeneratedProtocolMessageType( - "BackupInfo", - (_message.Message,), - { - "DESCRIPTOR": _BACKUPINFO, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """Information about a backup. - - Attributes: - backup: - Output only. Name of the backup. - start_time: - Output only. The time that the backup was started. Row data in - the backup will be no older than this timestamp. - end_time: - Output only. This time that the backup was finished. Row data - in the backup will be no newer than this timestamp. - source_table: - Output only. Name of the table the backup was created from. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.BackupInfo) - }, -) -_sym_db.RegisterMessage(BackupInfo) - - -DESCRIPTOR._options = None -_TABLE_CLUSTERSTATESENTRY._options = None -_TABLE_COLUMNFAMILIESENTRY._options = None -_TABLE._options = None -_SNAPSHOT._options = None -_BACKUP.fields_by_name["name"]._options = None -_BACKUP.fields_by_name["source_table"]._options = None -_BACKUP.fields_by_name["expire_time"]._options = None -_BACKUP.fields_by_name["start_time"]._options = None -_BACKUP.fields_by_name["end_time"]._options = None -_BACKUP.fields_by_name["size_bytes"]._options = None -_BACKUP.fields_by_name["state"]._options = None -_BACKUP._options = None -_BACKUPINFO.fields_by_name["backup"]._options = None -_BACKUPINFO.fields_by_name["start_time"]._options = None -_BACKUPINFO.fields_by_name["end_time"]._options = None -_BACKUPINFO.fields_by_name["source_table"]._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py deleted file mode 100644 index 8a9393943..000000000 --- a/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 8e6f504da..03c16fe92 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -484,9 +484,8 @@ def create_instance( request.instance_id = instance_id if instance is not None: request.instance = instance - - if clusters: - request.clusters.update(clusters) + if clusters is not None: + request.clusters = clusters # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1832,6 +1831,9 @@ def get_iam_policy( elif not request: request = iam_policy.GetIamPolicyRequest(resource=resource,) + if resource is not None: + request.resource = resource + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] @@ -1957,6 +1959,9 @@ def set_iam_policy( elif not request: request = iam_policy.SetIamPolicyRequest(resource=resource,) + if resource is not None: + request.resource = resource + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] @@ -2039,6 +2044,12 @@ def test_iam_permissions( resource=resource, permissions=permissions, ) + if resource is not None: + request.resource = resource + + if permissions: + request.permissions.extend(permissions) + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index f70936b5b..f92d47886 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 58eb4a9cd..7750340bc 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -952,9 +952,8 @@ def modify_column_families( if name is not None: request.name = name - - if modifications: - request.modifications.extend(modifications) + if modifications is not None: + request.modifications = modifications # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -2236,6 +2235,9 @@ def get_iam_policy( elif not request: request = iam_policy.GetIamPolicyRequest(resource=resource,) + if resource is not None: + request.resource = resource + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] @@ -2361,6 +2363,9 @@ def set_iam_policy( elif not request: request = iam_policy.SetIamPolicyRequest(resource=resource,) + if resource is not None: + request.resource = resource + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] @@ -2443,6 +2448,12 @@ def test_iam_permissions( resource=resource, permissions=permissions, ) + if resource is not None: + request.resource = resource + + if permissions: + request.permissions.extend(permissions) + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index be7c121d7..203d94f83 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table diff --git a/google/cloud/bigtable_admin_v2/types.py b/google/cloud/bigtable_admin_v2/types.py deleted file mode 100644 index 7dbb939d1..000000000 --- a/google/cloud/bigtable_admin_v2/types.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import common_pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 -from google.type import expr_pb2 - - -_shared_modules = [ - iam_policy_pb2, - options_pb2, - policy_pb2, - operations_pb2, - any_pb2, - duration_pb2, - empty_pb2, - field_mask_pb2, - timestamp_pb2, - status_pb2, - expr_pb2, -] - -_local_modules = [ - bigtable_instance_admin_pb2, - bigtable_table_admin_pb2, - common_pb2, - instance_pb2, - table_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.bigtable_admin_v2.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/bigtable_v2/__init__.py b/google/cloud/bigtable_v2/__init__.py index 8c31017cc..0ab15791b 100644 --- a/google/cloud/bigtable_v2/__init__.py +++ b/google/cloud/bigtable_v2/__init__.py @@ -1,42 +1,71 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.bigtable_v2 import types -from google.cloud.bigtable_v2.gapic import bigtable_client - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7. " - "More details about Python 2 support for Google Cloud Client Libraries " - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class BigtableClient(bigtable_client.BigtableClient): - __doc__ = bigtable_client.BigtableClient.__doc__ +from .services.bigtable import BigtableClient +from .types.bigtable import CheckAndMutateRowRequest +from .types.bigtable import CheckAndMutateRowResponse +from .types.bigtable import MutateRowRequest +from .types.bigtable import MutateRowResponse +from .types.bigtable import MutateRowsRequest +from .types.bigtable import MutateRowsResponse +from .types.bigtable import ReadModifyWriteRowRequest +from .types.bigtable import ReadModifyWriteRowResponse +from .types.bigtable import ReadRowsRequest +from .types.bigtable import ReadRowsResponse +from .types.bigtable import SampleRowKeysRequest +from .types.bigtable import SampleRowKeysResponse +from .types.data import Cell +from .types.data import Column +from .types.data import ColumnRange +from .types.data import Family +from .types.data import Mutation +from .types.data import ReadModifyWriteRule +from .types.data import Row +from .types.data import RowFilter +from .types.data import RowRange +from .types.data import RowSet +from .types.data import TimestampRange +from .types.data import ValueRange __all__ = ( - "types", + "Cell", + "CheckAndMutateRowRequest", + "CheckAndMutateRowResponse", + "Column", + "ColumnRange", + "Family", + "MutateRowRequest", + "MutateRowResponse", + "MutateRowsRequest", + "MutateRowsResponse", + "Mutation", + "ReadModifyWriteRowRequest", + "ReadModifyWriteRowResponse", + "ReadModifyWriteRule", + "ReadRowsRequest", + "ReadRowsResponse", + "Row", + "RowFilter", + "RowRange", + "RowSet", + "SampleRowKeysRequest", + "SampleRowKeysResponse", + "TimestampRange", + "ValueRange", "BigtableClient", ) diff --git a/google/cloud/bigtable_v2/gapic/__init__.py b/google/cloud/bigtable_v2/gapic/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/google/cloud/bigtable_v2/gapic/bigtable_client.py b/google/cloud/bigtable_v2/gapic/bigtable_client.py deleted file mode 100644 index f02e0048f..000000000 --- a/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ /dev/null @@ -1,771 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.bigtable.v2 Bigtable API.""" - -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.path_template -import grpc - -from google.cloud.bigtable_v2.gapic import bigtable_client_config -from google.cloud.bigtable_v2.gapic.transports import bigtable_grpc_transport -from google.cloud.bigtable_v2.proto import bigtable_pb2 -from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc -from google.cloud.bigtable_v2.proto import data_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable", -).version - - -class BigtableClient(object): - """Service for reading from and writing to existing Bigtable tables.""" - - SERVICE_ADDRESS = "bigtable.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.bigtable.v2.Bigtable" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def table_path(cls, project, instance, table): - """Return a fully-qualified table string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/tables/{table}", - project=project, - instance=instance, - table=table, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.BigtableGrpcTransport, - Callable[[~.Credentials, type], ~.BigtableGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = bigtable_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=bigtable_grpc_transport.BigtableGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = bigtable_grpc_transport.BigtableGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def read_rows( - self, - table_name, - app_profile_id=None, - rows=None, - filter_=None, - rows_limit=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> for element in client.read_rows(table_name): - ... # process element - ... pass - - Args: - table_name (str): Required. The unique name of the table from which to read. Values - are of the form - ``projects//instances//tables/
``. - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.RowSet` - filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset, - reads the entirety of each row. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.RowFilter` - rows_limit (long): The read will terminate after committing to N rows' worth of results. The - default (zero) is to return all results. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "read_rows" not in self._inner_api_calls: - self._inner_api_calls[ - "read_rows" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.read_rows, - default_retry=self._method_configs["ReadRows"].retry, - default_timeout=self._method_configs["ReadRows"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.ReadRowsRequest( - table_name=table_name, - app_profile_id=app_profile_id, - rows=rows, - filter=filter_, - rows_limit=rows_limit, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["read_rows"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def sample_row_keys( - self, - table_name, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> for element in client.sample_row_keys(table_name): - ... # process element - ... pass - - Args: - table_name (str): Required. The unique name of the table from which to sample row - keys. Values are of the form - ``projects//instances//tables/
``. - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.bigtable_v2.types.SampleRowKeysResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "sample_row_keys" not in self._inner_api_calls: - self._inner_api_calls[ - "sample_row_keys" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.sample_row_keys, - default_retry=self._method_configs["SampleRowKeys"].retry, - default_timeout=self._method_configs["SampleRowKeys"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.SampleRowKeysRequest( - table_name=table_name, app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["sample_row_keys"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def mutate_row( - self, - table_name, - row_key, - mutations, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by ``mutation``. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `row_key`: - >>> row_key = b'' - >>> - >>> # TODO: Initialize `mutations`: - >>> mutations = [] - >>> - >>> response = client.mutate_row(table_name, row_key, mutations) - - Args: - table_name (str): Required. The unique name of the table to which the mutation should - be applied. Values are of the form - ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the mutation should be applied. - mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Required. Changes to be atomically applied to the specified row. Entries are applied - in order, meaning that earlier mutations can be masked by later ones. - Must contain at least one entry and at most 100000. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Mutation` - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_v2.types.MutateRowResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "mutate_row" not in self._inner_api_calls: - self._inner_api_calls[ - "mutate_row" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.mutate_row, - default_retry=self._method_configs["MutateRow"].retry, - default_timeout=self._method_configs["MutateRow"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.MutateRowRequest( - table_name=table_name, - row_key=row_key, - mutations=mutations, - app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["mutate_row"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def mutate_rows( - self, - table_name, - entries, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `entries`: - >>> entries = [] - >>> - >>> for element in client.mutate_rows(table_name, entries): - ... # process element - ... pass - - Args: - table_name (str): Required. The unique name of the table to which the mutations should be applied. - entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): Required. The row keys and corresponding mutations to be applied in bulk. - Each entry is applied as an atomic mutation, but the entries may be - applied in arbitrary order (even between entries for the same row). - At least one entry must be specified, and in total the entries can - contain at most 100000 mutations. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Entry` - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "mutate_rows" not in self._inner_api_calls: - self._inner_api_calls[ - "mutate_rows" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.mutate_rows, - default_retry=self._method_configs["MutateRows"].retry, - default_timeout=self._method_configs["MutateRows"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, entries=entries, app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["mutate_rows"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def check_and_mutate_row( - self, - table_name, - row_key, - app_profile_id=None, - predicate_filter=None, - true_mutations=None, - false_mutations=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Mutates a row atomically based on the output of a predicate Reader filter. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `row_key`: - >>> row_key = b'' - >>> - >>> response = client.check_and_mutate_row(table_name, row_key) - - Args: - table_name (str): Required. The unique name of the table to which the conditional - mutation should be applied. Values are of the form - ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the conditional mutation should be applied. - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. - Depending on whether or not any results are yielded, either - ``true_mutations`` or ``false_mutations`` will be executed. If unset, - checks that the row contains any values at all. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.RowFilter` - true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if - ``predicate_filter`` yields at least one cell when applied to - ``row_key``. Entries are applied in order, meaning that earlier - mutations can be masked by later ones. Must contain at least one entry - if ``false_mutations`` is empty, and at most 100000. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Mutation` - false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if - ``predicate_filter`` does not yield any cells when applied to - ``row_key``. Entries are applied in order, meaning that earlier - mutations can be masked by later ones. Must contain at least one entry - if ``true_mutations`` is empty, and at most 100000. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Mutation` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "check_and_mutate_row" not in self._inner_api_calls: - self._inner_api_calls[ - "check_and_mutate_row" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.check_and_mutate_row, - default_retry=self._method_configs["CheckAndMutateRow"].retry, - default_timeout=self._method_configs["CheckAndMutateRow"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.CheckAndMutateRowRequest( - table_name=table_name, - row_key=row_key, - app_profile_id=app_profile_id, - predicate_filter=predicate_filter, - true_mutations=true_mutations, - false_mutations=false_mutations, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["check_and_mutate_row"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def read_modify_write_row( - self, - table_name, - row_key, - rules, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `row_key`: - >>> row_key = b'' - >>> - >>> # TODO: Initialize `rules`: - >>> rules = [] - >>> - >>> response = client.read_modify_write_row(table_name, row_key, rules) - - Args: - table_name (str): Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of the form - ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the read/modify/write rules should be applied. - rules (list[Union[dict, ~google.cloud.bigtable_v2.types.ReadModifyWriteRule]]): Required. Rules specifying how the specified row's contents are to be transformed - into writes. Entries are applied in order, meaning that earlier rules will - affect the results of later ones. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRule` - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "read_modify_write_row" not in self._inner_api_calls: - self._inner_api_calls[ - "read_modify_write_row" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.read_modify_write_row, - default_retry=self._method_configs["ReadModifyWriteRow"].retry, - default_timeout=self._method_configs["ReadModifyWriteRow"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.ReadModifyWriteRowRequest( - table_name=table_name, - row_key=row_key, - rules=rules, - app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["read_modify_write_row"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/bigtable_v2/gapic/bigtable_client_config.py b/google/cloud/bigtable_v2/gapic/bigtable_client_config.py deleted file mode 100644 index 965e6b90d..000000000 --- a/google/cloud/bigtable_v2/gapic/bigtable_client_config.py +++ /dev/null @@ -1,53 +0,0 @@ -config = { - "interfaces": { - "google.bigtable.v2.Bigtable": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "ReadRows": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "SampleRowKeys": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "MutateRow": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "MutateRows": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "CheckAndMutateRow": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ReadModifyWriteRow": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/google/cloud/bigtable_v2/gapic/transports/__init__.py b/google/cloud/bigtable_v2/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py deleted file mode 100644 index 5b2757db2..000000000 --- a/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ /dev/null @@ -1,207 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc - - -class BigtableGrpcTransport(object): - """gRPC transport class providing stubs for - google.bigtable.v2 Bigtable API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - def __init__( - self, channel=None, credentials=None, address="bigtable.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "bigtable_stub": bigtable_pb2_grpc.BigtableStub(channel), - } - - @classmethod - def create_channel( - cls, address="bigtable.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def read_rows(self): - """Return the gRPC stub for :meth:`BigtableClient.read_rows`. - - Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].ReadRows - - @property - def sample_row_keys(self): - """Return the gRPC stub for :meth:`BigtableClient.sample_row_keys`. - - Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].SampleRowKeys - - @property - def mutate_row(self): - """Return the gRPC stub for :meth:`BigtableClient.mutate_row`. - - Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by ``mutation``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].MutateRow - - @property - def mutate_rows(self): - """Return the gRPC stub for :meth:`BigtableClient.mutate_rows`. - - Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].MutateRows - - @property - def check_and_mutate_row(self): - """Return the gRPC stub for :meth:`BigtableClient.check_and_mutate_row`. - - Mutates a row atomically based on the output of a predicate Reader filter. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].CheckAndMutateRow - - @property - def read_modify_write_row(self): - """Return the gRPC stub for :meth:`BigtableClient.read_modify_write_row`. - - Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].ReadModifyWriteRow diff --git a/google/cloud/bigtable_v2/proto/__init__.py b/google/cloud/bigtable_v2/proto/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/google/cloud/bigtable_v2/proto/bigtable_pb2.py deleted file mode 100644 index ba711b20c..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ /dev/null @@ -1,1798 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_v2/proto/bigtable.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_v2.proto import ( - data_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2, -) -from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_v2/proto/bigtable.proto", - package="google.bigtable.v2", - syntax="proto3", - serialized_options=b"\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xd1\x01\n\x0fReadRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"i\n\x14SampleRowKeysRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\xb1\x01\n\x10MutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x13\n\x11MutateRowResponse"\xf9\x01\n\x11MutateRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xa9\x02\n\x18\x43heckAndMutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\xc1\x01\n\x19ReadModifyWriteRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xc4\x0e\n\x08\x42igtable\x12\xc6\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"m\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xd7\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"o\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xed\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"\x92\x01\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xde\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"\x7f\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xd9\x02\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"\xe6\x01\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\x89\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"\x93\x01\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\x93\x02\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2.DESCRIPTOR, - google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR, - google_dot_rpc_dot_status__pb2.DESCRIPTOR, - ], -) - - -_READROWSREQUEST = _descriptor.Descriptor( - name="ReadRowsRequest", - full_name="google.bigtable.v2.ReadRowsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.ReadRowsRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.ReadRowsRequest.app_profile_id", - index=1, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="rows", - full_name="google.bigtable.v2.ReadRowsRequest.rows", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.bigtable.v2.ReadRowsRequest.filter", - index=3, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="rows_limit", - full_name="google.bigtable.v2.ReadRowsRequest.rows_limit", - index=4, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=285, - serialized_end=494, -) - - -_READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor( - name="CellChunk", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.family_name", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="qualifier", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="timestamp_micros", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros", - index=3, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.labels", - index=4, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value", - index=5, - number=6, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value_size", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value_size", - index=6, - number=7, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="reset_row", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row", - index=7, - number=8, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="commit_row", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row", - index=8, - number=9, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="row_status", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_status", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=612, - serialized_end=873, -) - -_READROWSRESPONSE = _descriptor.Descriptor( - name="ReadRowsResponse", - full_name="google.bigtable.v2.ReadRowsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="chunks", - full_name="google.bigtable.v2.ReadRowsResponse.chunks", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="last_scanned_row_key", - full_name="google.bigtable.v2.ReadRowsResponse.last_scanned_row_key", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_READROWSRESPONSE_CELLCHUNK,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=497, - serialized_end=873, -) - - -_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( - name="SampleRowKeysRequest", - full_name="google.bigtable.v2.SampleRowKeysRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.SampleRowKeysRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.SampleRowKeysRequest.app_profile_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=875, - serialized_end=980, -) - - -_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( - name="SampleRowKeysResponse", - full_name="google.bigtable.v2.SampleRowKeysResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.SampleRowKeysResponse.row_key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="offset_bytes", - full_name="google.bigtable.v2.SampleRowKeysResponse.offset_bytes", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=982, - serialized_end=1044, -) - - -_MUTATEROWREQUEST = _descriptor.Descriptor( - name="MutateRowRequest", - full_name="google.bigtable.v2.MutateRowRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.MutateRowRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.MutateRowRequest.app_profile_id", - index=1, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.MutateRowRequest.row_key", - index=2, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="mutations", - full_name="google.bigtable.v2.MutateRowRequest.mutations", - index=3, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1047, - serialized_end=1224, -) - - -_MUTATEROWRESPONSE = _descriptor.Descriptor( - name="MutateRowResponse", - full_name="google.bigtable.v2.MutateRowResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1226, - serialized_end=1245, -) - - -_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( - name="Entry", - full_name="google.bigtable.v2.MutateRowsRequest.Entry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.MutateRowsRequest.Entry.row_key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="mutations", - full_name="google.bigtable.v2.MutateRowsRequest.Entry.mutations", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1419, - serialized_end=1497, -) - -_MUTATEROWSREQUEST = _descriptor.Descriptor( - name="MutateRowsRequest", - full_name="google.bigtable.v2.MutateRowsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.MutateRowsRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.MutateRowsRequest.app_profile_id", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="entries", - full_name="google.bigtable.v2.MutateRowsRequest.entries", - index=2, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_MUTATEROWSREQUEST_ENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1248, - serialized_end=1497, -) - - -_MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor( - name="Entry", - full_name="google.bigtable.v2.MutateRowsResponse.Entry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="index", - full_name="google.bigtable.v2.MutateRowsResponse.Entry.index", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.bigtable.v2.MutateRowsResponse.Entry.status", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1585, - serialized_end=1643, -) - -_MUTATEROWSRESPONSE = _descriptor.Descriptor( - name="MutateRowsResponse", - full_name="google.bigtable.v2.MutateRowsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="entries", - full_name="google.bigtable.v2.MutateRowsResponse.entries", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_MUTATEROWSRESPONSE_ENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1500, - serialized_end=1643, -) - - -_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( - name="CheckAndMutateRowRequest", - full_name="google.bigtable.v2.CheckAndMutateRowRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.app_profile_id", - index=1, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.row_key", - index=2, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="predicate_filter", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter", - index=3, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="true_mutations", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.true_mutations", - index=4, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="false_mutations", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.false_mutations", - index=5, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1646, - serialized_end=1943, -) - - -_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( - name="CheckAndMutateRowResponse", - full_name="google.bigtable.v2.CheckAndMutateRowResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="predicate_matched", - full_name="google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1945, - serialized_end=1999, -) - - -_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( - name="ReadModifyWriteRowRequest", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.app_profile_id", - index=1, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.row_key", - index=2, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="rules", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.rules", - index=3, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2002, - serialized_end=2195, -) - - -_READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor( - name="ReadModifyWriteRowResponse", - full_name="google.bigtable.v2.ReadModifyWriteRowResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row", - full_name="google.bigtable.v2.ReadModifyWriteRowResponse.row", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2197, - serialized_end=2263, -) - -_READROWSREQUEST.fields_by_name[ - "rows" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWSET -_READROWSREQUEST.fields_by_name[ - "filter" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "family_name" -].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "qualifier" -].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE -_READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE -_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append( - _READROWSRESPONSE_CELLCHUNK.fields_by_name["reset_row"] -) -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "reset_row" -].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"] -_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append( - _READROWSRESPONSE_CELLCHUNK.fields_by_name["commit_row"] -) -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "commit_row" -].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"] -_READROWSRESPONSE.fields_by_name["chunks"].message_type = _READROWSRESPONSE_CELLCHUNK -_MUTATEROWREQUEST.fields_by_name[ - "mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.fields_by_name[ - "mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST -_MUTATEROWSREQUEST.fields_by_name["entries"].message_type = _MUTATEROWSREQUEST_ENTRY -_MUTATEROWSRESPONSE_ENTRY.fields_by_name[ - "status" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE -_MUTATEROWSRESPONSE.fields_by_name["entries"].message_type = _MUTATEROWSRESPONSE_ENTRY -_CHECKANDMUTATEROWREQUEST.fields_by_name[ - "predicate_filter" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER -_CHECKANDMUTATEROWREQUEST.fields_by_name[ - "true_mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_CHECKANDMUTATEROWREQUEST.fields_by_name[ - "false_mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_READMODIFYWRITEROWREQUEST.fields_by_name[ - "rules" -].message_type = ( - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._READMODIFYWRITERULE -) -_READMODIFYWRITEROWRESPONSE.fields_by_name[ - "row" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROW -DESCRIPTOR.message_types_by_name["ReadRowsRequest"] = _READROWSREQUEST -DESCRIPTOR.message_types_by_name["ReadRowsResponse"] = _READROWSRESPONSE -DESCRIPTOR.message_types_by_name["SampleRowKeysRequest"] = _SAMPLEROWKEYSREQUEST -DESCRIPTOR.message_types_by_name["SampleRowKeysResponse"] = _SAMPLEROWKEYSRESPONSE -DESCRIPTOR.message_types_by_name["MutateRowRequest"] = _MUTATEROWREQUEST -DESCRIPTOR.message_types_by_name["MutateRowResponse"] = _MUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name["MutateRowsRequest"] = _MUTATEROWSREQUEST -DESCRIPTOR.message_types_by_name["MutateRowsResponse"] = _MUTATEROWSRESPONSE -DESCRIPTOR.message_types_by_name["CheckAndMutateRowRequest"] = _CHECKANDMUTATEROWREQUEST -DESCRIPTOR.message_types_by_name[ - "CheckAndMutateRowResponse" -] = _CHECKANDMUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name[ - "ReadModifyWriteRowRequest" -] = _READMODIFYWRITEROWREQUEST -DESCRIPTOR.message_types_by_name[ - "ReadModifyWriteRowResponse" -] = _READMODIFYWRITEROWRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ReadRowsRequest = _reflection.GeneratedProtocolMessageType( - "ReadRowsRequest", - (_message.Message,), - { - "DESCRIPTOR": _READROWSREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.ReadRows. - - Attributes: - table_name: - Required. The unique name of the table from which to read. - Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - rows: - The row keys and/or ranges to read. If not specified, reads - from all rows. - filter: - The filter to apply to the contents of the specified row(s). - If unset, reads the entirety of each row. - rows_limit: - The read will terminate after committing to N rows’ worth of - results. The default (zero) is to return all results. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) - }, -) -_sym_db.RegisterMessage(ReadRowsRequest) - -ReadRowsResponse = _reflection.GeneratedProtocolMessageType( - "ReadRowsResponse", - (_message.Message,), - { - "CellChunk": _reflection.GeneratedProtocolMessageType( - "CellChunk", - (_message.Message,), - { - "DESCRIPTOR": _READROWSRESPONSE_CELLCHUNK, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Specifies a piece of a row’s contents returned as part of the read - response stream. - - Attributes: - row_key: - The row key for this chunk of data. If the row key is empty, - this CellChunk is a continuation of the same row as the - previous CellChunk in the response stream, even if that - CellChunk was in a previous ReadRowsResponse message. - family_name: - The column family name for this chunk of data. If this message - is not present this CellChunk is a continuation of the same - column family as the previous CellChunk. The empty string can - occur as a column family name in a response so clients must - check explicitly for the presence of this message, not just - for ``family_name.value`` being non-empty. - qualifier: - The column qualifier for this chunk of data. If this message - is not present, this CellChunk is a continuation of the same - column as the previous CellChunk. Column qualifiers may be - empty so clients must check for the presence of this message, - not just for ``qualifier.value`` being non-empty. - timestamp_micros: - The cell’s stored timestamp, which also uniquely identifies it - within its column. Values are always expressed in - microseconds, but individual tables may set a coarser - granularity to further restrict the allowed values. For - example, a table which specifies millisecond granularity will - only allow values of ``timestamp_micros`` which are multiples - of 1000. Timestamps are only set in the first CellChunk per - cell (for cells split into multiple chunks). - labels: - Labels applied to the cell by a - [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set - on the first CellChunk per cell. - value: - The value stored in the cell. Cell values can be split across - multiple CellChunks. In that case only the value field will be - set in CellChunks after the first: the timestamp and labels - will only be present in the first CellChunk, even if the first - CellChunk came in a previous ReadRowsResponse. - value_size: - If this CellChunk is part of a chunked cell value and this is - not the final chunk of that cell, value_size will be set to - the total length of the cell value. The client can use this - size to pre-allocate memory to hold the full cell value. - row_status: - Signals to the client concerning previous CellChunks received. - reset_row: - Indicates that the client should drop all previous chunks for - ``row_key``, as it will be re-read from the beginning. - commit_row: - Indicates that the client can safely process all previous - chunks for ``row_key``, as its data has been fully read. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) - }, - ), - "DESCRIPTOR": _READROWSRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.ReadRows. - - Attributes: - chunks: - A collection of a row’s contents as part of the read request. - last_scanned_row_key: - Optionally the server might return the row key of the last row - it has scanned. The client can use this to construct a more - efficient retry request if needed: any row keys or portions of - ranges less than this row key can be dropped from the request. - This is primarily useful for cases where the server has read a - lot of data that was filtered out since the last committed row - key, allowing the client to skip that work on a retry. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) - }, -) -_sym_db.RegisterMessage(ReadRowsResponse) -_sym_db.RegisterMessage(ReadRowsResponse.CellChunk) - -SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType( - "SampleRowKeysRequest", - (_message.Message,), - { - "DESCRIPTOR": _SAMPLEROWKEYSREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.SampleRowKeys. - - Attributes: - table_name: - Required. The unique name of the table from which to sample - row keys. Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) - }, -) -_sym_db.RegisterMessage(SampleRowKeysRequest) - -SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType( - "SampleRowKeysResponse", - (_message.Message,), - { - "DESCRIPTOR": _SAMPLEROWKEYSRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.SampleRowKeys. - - Attributes: - row_key: - Sorted streamed sequence of sample row keys in the table. The - table might have contents before the first row key in the list - and after the last one, but a key containing the empty string - indicates “end of table” and will be the last response given, - if present. Note that row keys in this list may not have ever - been written to or read from, and users should therefore not - make any assumptions about the row key structure that are - specific to their use case. - offset_bytes: - Approximate total storage space used by all rows in the table - which precede ``row_key``. Buffering the contents of all rows - between two subsequent samples would require space roughly - equal to the difference in their ``offset_bytes`` fields. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) - }, -) -_sym_db.RegisterMessage(SampleRowKeysResponse) - -MutateRowRequest = _reflection.GeneratedProtocolMessageType( - "MutateRowRequest", - (_message.Message,), - { - "DESCRIPTOR": _MUTATEROWREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.MutateRow. - - Attributes: - table_name: - Required. The unique name of the table to which the mutation - should be applied. Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - row_key: - Required. The key of the row to which the mutation should be - applied. - mutations: - Required. Changes to be atomically applied to the specified - row. Entries are applied in order, meaning that earlier - mutations can be masked by later ones. Must contain at least - one entry and at most 100000. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) - }, -) -_sym_db.RegisterMessage(MutateRowRequest) - -MutateRowResponse = _reflection.GeneratedProtocolMessageType( - "MutateRowResponse", - (_message.Message,), - { - "DESCRIPTOR": _MUTATEROWRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.MutateRow.""", - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) - }, -) -_sym_db.RegisterMessage(MutateRowResponse) - -MutateRowsRequest = _reflection.GeneratedProtocolMessageType( - "MutateRowsRequest", - (_message.Message,), - { - "Entry": _reflection.GeneratedProtocolMessageType( - "Entry", - (_message.Message,), - { - "DESCRIPTOR": _MUTATEROWSREQUEST_ENTRY, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """A mutation for a given row. - - Attributes: - row_key: - The key of the row to which the ``mutations`` should be - applied. - mutations: - Required. Changes to be atomically applied to the specified - row. Mutations are applied in order, meaning that earlier - mutations can be masked by later ones. You must specify at - least one mutation. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) - }, - ), - "DESCRIPTOR": _MUTATEROWSREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for BigtableService.MutateRows. - - Attributes: - table_name: - Required. The unique name of the table to which the mutations - should be applied. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - entries: - Required. The row keys and corresponding mutations to be - applied in bulk. Each entry is applied as an atomic mutation, - but the entries may be applied in arbitrary order (even - between entries for the same row). At least one entry must be - specified, and in total the entries can contain at most 100000 - mutations. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) - }, -) -_sym_db.RegisterMessage(MutateRowsRequest) -_sym_db.RegisterMessage(MutateRowsRequest.Entry) - -MutateRowsResponse = _reflection.GeneratedProtocolMessageType( - "MutateRowsResponse", - (_message.Message,), - { - "Entry": _reflection.GeneratedProtocolMessageType( - "Entry", - (_message.Message,), - { - "DESCRIPTOR": _MUTATEROWSRESPONSE_ENTRY, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """The result of applying a passed mutation in the original request. - - Attributes: - index: - The index into the original request’s ``entries`` list of the - Entry for which a result is being reported. - status: - The result of the request Entry identified by ``index``. - Depending on how requests are batched during execution, it is - possible for one Entry to fail due to an error with another - Entry. In the event that this occurs, the same error will be - reported for both entries. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) - }, - ), - "DESCRIPTOR": _MUTATEROWSRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for BigtableService.MutateRows. - - Attributes: - entries: - One or more results for Entries from the batch request. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) - }, -) -_sym_db.RegisterMessage(MutateRowsResponse) -_sym_db.RegisterMessage(MutateRowsResponse.Entry) - -CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType( - "CheckAndMutateRowRequest", - (_message.Message,), - { - "DESCRIPTOR": _CHECKANDMUTATEROWREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.CheckAndMutateRow. - - Attributes: - table_name: - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - row_key: - Required. The key of the row to which the conditional mutation - should be applied. - predicate_filter: - The filter to be applied to the contents of the specified row. - Depending on whether or not any results are yielded, either - ``true_mutations`` or ``false_mutations`` will be executed. If - unset, checks that the row contains any values at all. - true_mutations: - Changes to be atomically applied to the specified row if - ``predicate_filter`` yields at least one cell when applied to - ``row_key``. Entries are applied in order, meaning that - earlier mutations can be masked by later ones. Must contain at - least one entry if ``false_mutations`` is empty, and at most - 100000. - false_mutations: - Changes to be atomically applied to the specified row if - ``predicate_filter`` does not yield any cells when applied to - ``row_key``. Entries are applied in order, meaning that - earlier mutations can be masked by later ones. Must contain at - least one entry if ``true_mutations`` is empty, and at most - 100000. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) - }, -) -_sym_db.RegisterMessage(CheckAndMutateRowRequest) - -CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType( - "CheckAndMutateRowResponse", - (_message.Message,), - { - "DESCRIPTOR": _CHECKANDMUTATEROWRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.CheckAndMutateRow. - - Attributes: - predicate_matched: - Whether or not the request’s ``predicate_filter`` yielded any - results for the specified row. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) - }, -) -_sym_db.RegisterMessage(CheckAndMutateRowResponse) - -ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType( - "ReadModifyWriteRowRequest", - (_message.Message,), - { - "DESCRIPTOR": _READMODIFYWRITEROWREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.ReadModifyWriteRow. - - Attributes: - table_name: - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of the - form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - row_key: - Required. The key of the row to which the read/modify/write - rules should be applied. - rules: - Required. Rules specifying how the specified row’s contents - are to be transformed into writes. Entries are applied in - order, meaning that earlier rules will affect the results of - later ones. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) - }, -) -_sym_db.RegisterMessage(ReadModifyWriteRowRequest) - -ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType( - "ReadModifyWriteRowResponse", - (_message.Message,), - { - "DESCRIPTOR": _READMODIFYWRITEROWRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.ReadModifyWriteRow. - - Attributes: - row: - A Row containing the new contents of all cells modified by the - request. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) - }, -) -_sym_db.RegisterMessage(ReadModifyWriteRowResponse) - - -DESCRIPTOR._options = None -_READROWSREQUEST.fields_by_name["table_name"]._options = None -_SAMPLEROWKEYSREQUEST.fields_by_name["table_name"]._options = None -_MUTATEROWREQUEST.fields_by_name["table_name"]._options = None -_MUTATEROWREQUEST.fields_by_name["row_key"]._options = None -_MUTATEROWREQUEST.fields_by_name["mutations"]._options = None -_MUTATEROWSREQUEST_ENTRY.fields_by_name["mutations"]._options = None -_MUTATEROWSREQUEST.fields_by_name["table_name"]._options = None -_MUTATEROWSREQUEST.fields_by_name["entries"]._options = None -_CHECKANDMUTATEROWREQUEST.fields_by_name["table_name"]._options = None -_CHECKANDMUTATEROWREQUEST.fields_by_name["row_key"]._options = None -_READMODIFYWRITEROWREQUEST.fields_by_name["table_name"]._options = None -_READMODIFYWRITEROWREQUEST.fields_by_name["row_key"]._options = None -_READMODIFYWRITEROWREQUEST.fields_by_name["rules"]._options = None - -_BIGTABLE = _descriptor.ServiceDescriptor( - name="Bigtable", - full_name="google.bigtable.v2.Bigtable", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", - create_key=_descriptor._internal_create_key, - serialized_start=2266, - serialized_end=4126, - methods=[ - _descriptor.MethodDescriptor( - name="ReadRows", - full_name="google.bigtable.v2.Bigtable.ReadRows", - index=0, - containing_service=None, - input_type=_READROWSREQUEST, - output_type=_READROWSRESPONSE, - serialized_options=b'\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*\332A\ntable_name\332A\031table_name,app_profile_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SampleRowKeys", - full_name="google.bigtable.v2.Bigtable.SampleRowKeys", - index=1, - containing_service=None, - input_type=_SAMPLEROWKEYSREQUEST, - output_type=_SAMPLEROWKEYSRESPONSE, - serialized_options=b"\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\332A\ntable_name\332A\031table_name,app_profile_id", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="MutateRow", - full_name="google.bigtable.v2.Bigtable.MutateRow", - index=2, - containing_service=None, - input_type=_MUTATEROWREQUEST, - output_type=_MUTATEROWRESPONSE, - serialized_options=b'\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="MutateRows", - full_name="google.bigtable.v2.Bigtable.MutateRows", - index=3, - containing_service=None, - input_type=_MUTATEROWSREQUEST, - output_type=_MUTATEROWSRESPONSE, - serialized_options=b'\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*\332A\022table_name,entries\332A!table_name,entries,app_profile_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CheckAndMutateRow", - full_name="google.bigtable.v2.Bigtable.CheckAndMutateRow", - index=4, - containing_service=None, - input_type=_CHECKANDMUTATEROWREQUEST, - output_type=_CHECKANDMUTATEROWRESPONSE, - serialized_options=b'\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ReadModifyWriteRow", - full_name="google.bigtable.v2.Bigtable.ReadModifyWriteRow", - index=5, - containing_service=None, - input_type=_READMODIFYWRITEROWREQUEST, - output_type=_READMODIFYWRITEROWRESPONSE, - serialized_options=b"\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*\332A\030table_name,row_key,rules\332A'table_name,row_key,rules,app_profile_id", - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_BIGTABLE) - -DESCRIPTOR.services_by_name["Bigtable"] = _BIGTABLE - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py b/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py deleted file mode 100644 index a4f25dcb0..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py +++ /dev/null @@ -1,329 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2, -) - - -class BigtableStub(object): - """Service for reading from and writing to existing Bigtable tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ReadRows = channel.unary_stream( - "/google.bigtable.v2.Bigtable/ReadRows", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, - ) - self.SampleRowKeys = channel.unary_stream( - "/google.bigtable.v2.Bigtable/SampleRowKeys", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, - ) - self.MutateRow = channel.unary_unary( - "/google.bigtable.v2.Bigtable/MutateRow", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, - ) - self.MutateRows = channel.unary_stream( - "/google.bigtable.v2.Bigtable/MutateRows", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, - ) - self.CheckAndMutateRow = channel.unary_unary( - "/google.bigtable.v2.Bigtable/CheckAndMutateRow", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, - ) - self.ReadModifyWriteRow = channel.unary_unary( - "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, - ) - - -class BigtableServicer(object): - """Service for reading from and writing to existing Bigtable tables. - """ - - def ReadRows(self, request, context): - """Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SampleRowKeys(self, request, context): - """Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def MutateRow(self, request, context): - """Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by `mutation`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def MutateRows(self, request, context): - """Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CheckAndMutateRow(self, request, context): - """Mutates a row atomically based on the output of a predicate Reader filter. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ReadModifyWriteRow(self, request, context): - """Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_BigtableServicer_to_server(servicer, server): - rpc_method_handlers = { - "ReadRows": grpc.unary_stream_rpc_method_handler( - servicer.ReadRows, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.SerializeToString, - ), - "SampleRowKeys": grpc.unary_stream_rpc_method_handler( - servicer.SampleRowKeys, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.SerializeToString, - ), - "MutateRow": grpc.unary_unary_rpc_method_handler( - servicer.MutateRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.SerializeToString, - ), - "MutateRows": grpc.unary_stream_rpc_method_handler( - servicer.MutateRows, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.SerializeToString, - ), - "CheckAndMutateRow": grpc.unary_unary_rpc_method_handler( - servicer.CheckAndMutateRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.SerializeToString, - ), - "ReadModifyWriteRow": grpc.unary_unary_rpc_method_handler( - servicer.ReadModifyWriteRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.bigtable.v2.Bigtable", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class Bigtable(object): - """Service for reading from and writing to existing Bigtable tables. - """ - - @staticmethod - def ReadRows( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_stream( - request, - target, - "/google.bigtable.v2.Bigtable/ReadRows", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SampleRowKeys( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_stream( - request, - target, - "/google.bigtable.v2.Bigtable/SampleRowKeys", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def MutateRow( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.v2.Bigtable/MutateRow", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def MutateRows( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_stream( - request, - target, - "/google.bigtable.v2.Bigtable/MutateRows", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CheckAndMutateRow( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.v2.Bigtable/CheckAndMutateRow", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ReadModifyWriteRow( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/bigtable_v2/proto/data_pb2.py b/google/cloud/bigtable_v2/proto/data_pb2.py deleted file mode 100644 index a64f9b10e..000000000 --- a/google/cloud/bigtable_v2/proto/data_pb2.py +++ /dev/null @@ -1,2668 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_v2/proto/data.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_v2/proto/data.proto", - package="google.bigtable.v2", - syntax="proto3", - serialized_options=b"\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\xb5\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3', -) - - -_ROW = _descriptor.Descriptor( - name="Row", - full_name="google.bigtable.v2.Row", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.v2.Row.key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="families", - full_name="google.bigtable.v2.Row.families", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=65, - serialized_end=129, -) - - -_FAMILY = _descriptor.Descriptor( - name="Family", - full_name="google.bigtable.v2.Family", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.v2.Family.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="columns", - full_name="google.bigtable.v2.Family.columns", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=131, - serialized_end=198, -) - - -_COLUMN = _descriptor.Descriptor( - name="Column", - full_name="google.bigtable.v2.Column", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="qualifier", - full_name="google.bigtable.v2.Column.qualifier", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cells", - full_name="google.bigtable.v2.Column.cells", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=200, - serialized_end=268, -) - - -_CELL = _descriptor.Descriptor( - name="Cell", - full_name="google.bigtable.v2.Cell", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="timestamp_micros", - full_name="google.bigtable.v2.Cell.timestamp_micros", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.v2.Cell.value", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.bigtable.v2.Cell.labels", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=270, - serialized_end=333, -) - - -_ROWRANGE = _descriptor.Descriptor( - name="RowRange", - full_name="google.bigtable.v2.RowRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="start_key_closed", - full_name="google.bigtable.v2.RowRange.start_key_closed", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_key_open", - full_name="google.bigtable.v2.RowRange.start_key_open", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_key_open", - full_name="google.bigtable.v2.RowRange.end_key_open", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_key_closed", - full_name="google.bigtable.v2.RowRange.end_key_closed", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_key", - full_name="google.bigtable.v2.RowRange.start_key", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_key", - full_name="google.bigtable.v2.RowRange.end_key", - index=1, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=336, - serialized_end=474, -) - - -_ROWSET = _descriptor.Descriptor( - name="RowSet", - full_name="google.bigtable.v2.RowSet", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_keys", - full_name="google.bigtable.v2.RowSet.row_keys", - index=0, - number=1, - type=12, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_ranges", - full_name="google.bigtable.v2.RowSet.row_ranges", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=476, - serialized_end=552, -) - - -_COLUMNRANGE = _descriptor.Descriptor( - name="ColumnRange", - full_name="google.bigtable.v2.ColumnRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.ColumnRange.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_qualifier_closed", - full_name="google.bigtable.v2.ColumnRange.start_qualifier_closed", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_qualifier_open", - full_name="google.bigtable.v2.ColumnRange.start_qualifier_open", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_qualifier_closed", - full_name="google.bigtable.v2.ColumnRange.end_qualifier_closed", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_qualifier_open", - full_name="google.bigtable.v2.ColumnRange.end_qualifier_open", - index=4, - number=5, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_qualifier", - full_name="google.bigtable.v2.ColumnRange.start_qualifier", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_qualifier", - full_name="google.bigtable.v2.ColumnRange.end_qualifier", - index=1, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=555, - serialized_end=753, -) - - -_TIMESTAMPRANGE = _descriptor.Descriptor( - name="TimestampRange", - full_name="google.bigtable.v2.TimestampRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="start_timestamp_micros", - full_name="google.bigtable.v2.TimestampRange.start_timestamp_micros", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_timestamp_micros", - full_name="google.bigtable.v2.TimestampRange.end_timestamp_micros", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=755, - serialized_end=833, -) - - -_VALUERANGE = _descriptor.Descriptor( - name="ValueRange", - full_name="google.bigtable.v2.ValueRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="start_value_closed", - full_name="google.bigtable.v2.ValueRange.start_value_closed", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_value_open", - full_name="google.bigtable.v2.ValueRange.start_value_open", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_value_closed", - full_name="google.bigtable.v2.ValueRange.end_value_closed", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_value_open", - full_name="google.bigtable.v2.ValueRange.end_value_open", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_value", - full_name="google.bigtable.v2.ValueRange.start_value", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_value", - full_name="google.bigtable.v2.ValueRange.end_value", - index=1, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=836, - serialized_end=988, -) - - -_ROWFILTER_CHAIN = _descriptor.Descriptor( - name="Chain", - full_name="google.bigtable.v2.RowFilter.Chain", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="filters", - full_name="google.bigtable.v2.RowFilter.Chain.filters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1807, - serialized_end=1862, -) - -_ROWFILTER_INTERLEAVE = _descriptor.Descriptor( - name="Interleave", - full_name="google.bigtable.v2.RowFilter.Interleave", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="filters", - full_name="google.bigtable.v2.RowFilter.Interleave.filters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1864, - serialized_end=1924, -) - -_ROWFILTER_CONDITION = _descriptor.Descriptor( - name="Condition", - full_name="google.bigtable.v2.RowFilter.Condition", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="predicate_filter", - full_name="google.bigtable.v2.RowFilter.Condition.predicate_filter", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="true_filter", - full_name="google.bigtable.v2.RowFilter.Condition.true_filter", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="false_filter", - full_name="google.bigtable.v2.RowFilter.Condition.false_filter", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1927, - serialized_end=2100, -) - -_ROWFILTER = _descriptor.Descriptor( - name="RowFilter", - full_name="google.bigtable.v2.RowFilter", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="chain", - full_name="google.bigtable.v2.RowFilter.chain", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="interleave", - full_name="google.bigtable.v2.RowFilter.interleave", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="condition", - full_name="google.bigtable.v2.RowFilter.condition", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="sink", - full_name="google.bigtable.v2.RowFilter.sink", - index=3, - number=16, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="pass_all_filter", - full_name="google.bigtable.v2.RowFilter.pass_all_filter", - index=4, - number=17, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="block_all_filter", - full_name="google.bigtable.v2.RowFilter.block_all_filter", - index=5, - number=18, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key_regex_filter", - full_name="google.bigtable.v2.RowFilter.row_key_regex_filter", - index=6, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_sample_filter", - full_name="google.bigtable.v2.RowFilter.row_sample_filter", - index=7, - number=14, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="family_name_regex_filter", - full_name="google.bigtable.v2.RowFilter.family_name_regex_filter", - index=8, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_qualifier_regex_filter", - full_name="google.bigtable.v2.RowFilter.column_qualifier_regex_filter", - index=9, - number=6, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_range_filter", - full_name="google.bigtable.v2.RowFilter.column_range_filter", - index=10, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="timestamp_range_filter", - full_name="google.bigtable.v2.RowFilter.timestamp_range_filter", - index=11, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value_regex_filter", - full_name="google.bigtable.v2.RowFilter.value_regex_filter", - index=12, - number=9, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value_range_filter", - full_name="google.bigtable.v2.RowFilter.value_range_filter", - index=13, - number=15, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cells_per_row_offset_filter", - full_name="google.bigtable.v2.RowFilter.cells_per_row_offset_filter", - index=14, - number=10, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cells_per_row_limit_filter", - full_name="google.bigtable.v2.RowFilter.cells_per_row_limit_filter", - index=15, - number=11, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cells_per_column_limit_filter", - full_name="google.bigtable.v2.RowFilter.cells_per_column_limit_filter", - index=16, - number=12, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="strip_value_transformer", - full_name="google.bigtable.v2.RowFilter.strip_value_transformer", - index=17, - number=13, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="apply_label_transformer", - full_name="google.bigtable.v2.RowFilter.apply_label_transformer", - index=18, - number=19, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="filter", - full_name="google.bigtable.v2.RowFilter.filter", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=991, - serialized_end=2110, -) - - -_MUTATION_SETCELL = _descriptor.Descriptor( - name="SetCell", - full_name="google.bigtable.v2.Mutation.SetCell", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.Mutation.SetCell.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_qualifier", - full_name="google.bigtable.v2.Mutation.SetCell.column_qualifier", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="timestamp_micros", - full_name="google.bigtable.v2.Mutation.SetCell.timestamp_micros", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.v2.Mutation.SetCell.value", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2408, - serialized_end=2505, -) - -_MUTATION_DELETEFROMCOLUMN = _descriptor.Descriptor( - name="DeleteFromColumn", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_qualifier", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn.column_qualifier", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="time_range", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn.time_range", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2507, - serialized_end=2628, -) - -_MUTATION_DELETEFROMFAMILY = _descriptor.Descriptor( - name="DeleteFromFamily", - full_name="google.bigtable.v2.Mutation.DeleteFromFamily", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.Mutation.DeleteFromFamily.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2630, - serialized_end=2669, -) - -_MUTATION_DELETEFROMROW = _descriptor.Descriptor( - name="DeleteFromRow", - full_name="google.bigtable.v2.Mutation.DeleteFromRow", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2671, - serialized_end=2686, -) - -_MUTATION = _descriptor.Descriptor( - name="Mutation", - full_name="google.bigtable.v2.Mutation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="set_cell", - full_name="google.bigtable.v2.Mutation.set_cell", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_from_column", - full_name="google.bigtable.v2.Mutation.delete_from_column", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_from_family", - full_name="google.bigtable.v2.Mutation.delete_from_family", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_from_row", - full_name="google.bigtable.v2.Mutation.delete_from_row", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _MUTATION_SETCELL, - _MUTATION_DELETEFROMCOLUMN, - _MUTATION_DELETEFROMFAMILY, - _MUTATION_DELETEFROMROW, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="mutation", - full_name="google.bigtable.v2.Mutation.mutation", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=2113, - serialized_end=2698, -) - - -_READMODIFYWRITERULE = _descriptor.Descriptor( - name="ReadModifyWriteRule", - full_name="google.bigtable.v2.ReadModifyWriteRule", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.ReadModifyWriteRule.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_qualifier", - full_name="google.bigtable.v2.ReadModifyWriteRule.column_qualifier", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="append_value", - full_name="google.bigtable.v2.ReadModifyWriteRule.append_value", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="increment_amount", - full_name="google.bigtable.v2.ReadModifyWriteRule.increment_amount", - index=3, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="rule", - full_name="google.bigtable.v2.ReadModifyWriteRule.rule", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=2701, - serialized_end=2829, -) - -_ROW.fields_by_name["families"].message_type = _FAMILY -_FAMILY.fields_by_name["columns"].message_type = _COLUMN -_COLUMN.fields_by_name["cells"].message_type = _CELL -_ROWRANGE.oneofs_by_name["start_key"].fields.append( - _ROWRANGE.fields_by_name["start_key_closed"] -) -_ROWRANGE.fields_by_name[ - "start_key_closed" -].containing_oneof = _ROWRANGE.oneofs_by_name["start_key"] -_ROWRANGE.oneofs_by_name["start_key"].fields.append( - _ROWRANGE.fields_by_name["start_key_open"] -) -_ROWRANGE.fields_by_name["start_key_open"].containing_oneof = _ROWRANGE.oneofs_by_name[ - "start_key" -] -_ROWRANGE.oneofs_by_name["end_key"].fields.append( - _ROWRANGE.fields_by_name["end_key_open"] -) -_ROWRANGE.fields_by_name["end_key_open"].containing_oneof = _ROWRANGE.oneofs_by_name[ - "end_key" -] -_ROWRANGE.oneofs_by_name["end_key"].fields.append( - _ROWRANGE.fields_by_name["end_key_closed"] -) -_ROWRANGE.fields_by_name["end_key_closed"].containing_oneof = _ROWRANGE.oneofs_by_name[ - "end_key" -] -_ROWSET.fields_by_name["row_ranges"].message_type = _ROWRANGE -_COLUMNRANGE.oneofs_by_name["start_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["start_qualifier_closed"] -) -_COLUMNRANGE.fields_by_name[ - "start_qualifier_closed" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["start_qualifier"] -_COLUMNRANGE.oneofs_by_name["start_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["start_qualifier_open"] -) -_COLUMNRANGE.fields_by_name[ - "start_qualifier_open" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["start_qualifier"] -_COLUMNRANGE.oneofs_by_name["end_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["end_qualifier_closed"] -) -_COLUMNRANGE.fields_by_name[ - "end_qualifier_closed" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["end_qualifier"] -_COLUMNRANGE.oneofs_by_name["end_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["end_qualifier_open"] -) -_COLUMNRANGE.fields_by_name[ - "end_qualifier_open" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["end_qualifier"] -_VALUERANGE.oneofs_by_name["start_value"].fields.append( - _VALUERANGE.fields_by_name["start_value_closed"] -) -_VALUERANGE.fields_by_name[ - "start_value_closed" -].containing_oneof = _VALUERANGE.oneofs_by_name["start_value"] -_VALUERANGE.oneofs_by_name["start_value"].fields.append( - _VALUERANGE.fields_by_name["start_value_open"] -) -_VALUERANGE.fields_by_name[ - "start_value_open" -].containing_oneof = _VALUERANGE.oneofs_by_name["start_value"] -_VALUERANGE.oneofs_by_name["end_value"].fields.append( - _VALUERANGE.fields_by_name["end_value_closed"] -) -_VALUERANGE.fields_by_name[ - "end_value_closed" -].containing_oneof = _VALUERANGE.oneofs_by_name["end_value"] -_VALUERANGE.oneofs_by_name["end_value"].fields.append( - _VALUERANGE.fields_by_name["end_value_open"] -) -_VALUERANGE.fields_by_name[ - "end_value_open" -].containing_oneof = _VALUERANGE.oneofs_by_name["end_value"] -_ROWFILTER_CHAIN.fields_by_name["filters"].message_type = _ROWFILTER -_ROWFILTER_CHAIN.containing_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.fields_by_name["filters"].message_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.containing_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name["predicate_filter"].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name["true_filter"].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name["false_filter"].message_type = _ROWFILTER -_ROWFILTER_CONDITION.containing_type = _ROWFILTER -_ROWFILTER.fields_by_name["chain"].message_type = _ROWFILTER_CHAIN -_ROWFILTER.fields_by_name["interleave"].message_type = _ROWFILTER_INTERLEAVE -_ROWFILTER.fields_by_name["condition"].message_type = _ROWFILTER_CONDITION -_ROWFILTER.fields_by_name["column_range_filter"].message_type = _COLUMNRANGE -_ROWFILTER.fields_by_name["timestamp_range_filter"].message_type = _TIMESTAMPRANGE -_ROWFILTER.fields_by_name["value_range_filter"].message_type = _VALUERANGE -_ROWFILTER.oneofs_by_name["filter"].fields.append(_ROWFILTER.fields_by_name["chain"]) -_ROWFILTER.fields_by_name["chain"].containing_oneof = _ROWFILTER.oneofs_by_name[ - "filter" -] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["interleave"] -) -_ROWFILTER.fields_by_name["interleave"].containing_oneof = _ROWFILTER.oneofs_by_name[ - "filter" -] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["condition"] -) -_ROWFILTER.fields_by_name["condition"].containing_oneof = _ROWFILTER.oneofs_by_name[ - "filter" -] -_ROWFILTER.oneofs_by_name["filter"].fields.append(_ROWFILTER.fields_by_name["sink"]) -_ROWFILTER.fields_by_name["sink"].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["pass_all_filter"] -) -_ROWFILTER.fields_by_name[ - "pass_all_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["block_all_filter"] -) -_ROWFILTER.fields_by_name[ - "block_all_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["row_key_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "row_key_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["row_sample_filter"] -) -_ROWFILTER.fields_by_name[ - "row_sample_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["family_name_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "family_name_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["column_qualifier_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "column_qualifier_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["column_range_filter"] -) -_ROWFILTER.fields_by_name[ - "column_range_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["timestamp_range_filter"] -) -_ROWFILTER.fields_by_name[ - "timestamp_range_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["value_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "value_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["value_range_filter"] -) -_ROWFILTER.fields_by_name[ - "value_range_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["cells_per_row_offset_filter"] -) -_ROWFILTER.fields_by_name[ - "cells_per_row_offset_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["cells_per_row_limit_filter"] -) -_ROWFILTER.fields_by_name[ - "cells_per_row_limit_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["cells_per_column_limit_filter"] -) -_ROWFILTER.fields_by_name[ - "cells_per_column_limit_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["strip_value_transformer"] -) -_ROWFILTER.fields_by_name[ - "strip_value_transformer" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["apply_label_transformer"] -) -_ROWFILTER.fields_by_name[ - "apply_label_transformer" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_MUTATION_SETCELL.containing_type = _MUTATION -_MUTATION_DELETEFROMCOLUMN.fields_by_name["time_range"].message_type = _TIMESTAMPRANGE -_MUTATION_DELETEFROMCOLUMN.containing_type = _MUTATION -_MUTATION_DELETEFROMFAMILY.containing_type = _MUTATION -_MUTATION_DELETEFROMROW.containing_type = _MUTATION -_MUTATION.fields_by_name["set_cell"].message_type = _MUTATION_SETCELL -_MUTATION.fields_by_name["delete_from_column"].message_type = _MUTATION_DELETEFROMCOLUMN -_MUTATION.fields_by_name["delete_from_family"].message_type = _MUTATION_DELETEFROMFAMILY -_MUTATION.fields_by_name["delete_from_row"].message_type = _MUTATION_DELETEFROMROW -_MUTATION.oneofs_by_name["mutation"].fields.append(_MUTATION.fields_by_name["set_cell"]) -_MUTATION.fields_by_name["set_cell"].containing_oneof = _MUTATION.oneofs_by_name[ - "mutation" -] -_MUTATION.oneofs_by_name["mutation"].fields.append( - _MUTATION.fields_by_name["delete_from_column"] -) -_MUTATION.fields_by_name[ - "delete_from_column" -].containing_oneof = _MUTATION.oneofs_by_name["mutation"] -_MUTATION.oneofs_by_name["mutation"].fields.append( - _MUTATION.fields_by_name["delete_from_family"] -) -_MUTATION.fields_by_name[ - "delete_from_family" -].containing_oneof = _MUTATION.oneofs_by_name["mutation"] -_MUTATION.oneofs_by_name["mutation"].fields.append( - _MUTATION.fields_by_name["delete_from_row"] -) -_MUTATION.fields_by_name["delete_from_row"].containing_oneof = _MUTATION.oneofs_by_name[ - "mutation" -] -_READMODIFYWRITERULE.oneofs_by_name["rule"].fields.append( - _READMODIFYWRITERULE.fields_by_name["append_value"] -) -_READMODIFYWRITERULE.fields_by_name[ - "append_value" -].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name["rule"] -_READMODIFYWRITERULE.oneofs_by_name["rule"].fields.append( - _READMODIFYWRITERULE.fields_by_name["increment_amount"] -) -_READMODIFYWRITERULE.fields_by_name[ - "increment_amount" -].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name["rule"] -DESCRIPTOR.message_types_by_name["Row"] = _ROW -DESCRIPTOR.message_types_by_name["Family"] = _FAMILY -DESCRIPTOR.message_types_by_name["Column"] = _COLUMN -DESCRIPTOR.message_types_by_name["Cell"] = _CELL -DESCRIPTOR.message_types_by_name["RowRange"] = _ROWRANGE -DESCRIPTOR.message_types_by_name["RowSet"] = _ROWSET -DESCRIPTOR.message_types_by_name["ColumnRange"] = _COLUMNRANGE -DESCRIPTOR.message_types_by_name["TimestampRange"] = _TIMESTAMPRANGE -DESCRIPTOR.message_types_by_name["ValueRange"] = _VALUERANGE -DESCRIPTOR.message_types_by_name["RowFilter"] = _ROWFILTER -DESCRIPTOR.message_types_by_name["Mutation"] = _MUTATION -DESCRIPTOR.message_types_by_name["ReadModifyWriteRule"] = _READMODIFYWRITERULE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Row = _reflection.GeneratedProtocolMessageType( - "Row", - (_message.Message,), - { - "DESCRIPTOR": _ROW, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies the complete (requested) contents of a single row of a - table. Rows which exceed 256MiB in size cannot be read in full. - - Attributes: - key: - The unique key which identifies this row within its table. - This is the same key that’s used to identify the row in, for - example, a MutateRowRequest. May contain any non-empty byte - string up to 4KiB in length. - families: - May be empty, but only if the entire row is empty. The mutual - ordering of column families is not specified. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Row) - }, -) -_sym_db.RegisterMessage(Row) - -Family = _reflection.GeneratedProtocolMessageType( - "Family", - (_message.Message,), - { - "DESCRIPTOR": _FAMILY, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies (some of) the contents of a single row/column family - intersection of a table. - - Attributes: - name: - The unique key which identifies this family within its row. - This is the same key that’s used to identify the family in, - for example, a RowFilter which sets its - “family_name_regex_filter” field. Must match - ``[-_.a-zA-Z0-9]+``, except that AggregatingRowProcessors may - produce cells in a sentinel family with an empty name. Must be - no greater than 64 characters in length. - columns: - Must not be empty. Sorted in order of increasing “qualifier”. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Family) - }, -) -_sym_db.RegisterMessage(Family) - -Column = _reflection.GeneratedProtocolMessageType( - "Column", - (_message.Message,), - { - "DESCRIPTOR": _COLUMN, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies (some of) the contents of a single row/column intersection - of a table. - - Attributes: - qualifier: - The unique key which identifies this column within its family. - This is the same key that’s used to identify the column in, - for example, a RowFilter which sets its - ``column_qualifier_regex_filter`` field. May contain any byte - string, including the empty string, up to 16kiB in length. - cells: - Must not be empty. Sorted in order of decreasing - “timestamp_micros”. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Column) - }, -) -_sym_db.RegisterMessage(Column) - -Cell = _reflection.GeneratedProtocolMessageType( - "Cell", - (_message.Message,), - { - "DESCRIPTOR": _CELL, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies (some of) the contents of a single row/column/timestamp of a - table. - - Attributes: - timestamp_micros: - The cell’s stored timestamp, which also uniquely identifies it - within its column. Values are always expressed in - microseconds, but individual tables may set a coarser - granularity to further restrict the allowed values. For - example, a table which specifies millisecond granularity will - only allow values of ``timestamp_micros`` which are multiples - of 1000. - value: - The value stored in the cell. May contain any byte string, - including the empty string, up to 100MiB in length. - labels: - Labels applied to the cell by a - [RowFilter][google.bigtable.v2.RowFilter]. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Cell) - }, -) -_sym_db.RegisterMessage(Cell) - -RowRange = _reflection.GeneratedProtocolMessageType( - "RowRange", - (_message.Message,), - { - "DESCRIPTOR": _ROWRANGE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a contiguous range of rows. - - Attributes: - start_key: - The row key at which to start the range. If neither field is - set, interpreted as the empty string, inclusive. - start_key_closed: - Used when giving an inclusive lower bound for the range. - start_key_open: - Used when giving an exclusive lower bound for the range. - end_key: - The row key at which to end the range. If neither field is - set, interpreted as the infinite row key, exclusive. - end_key_open: - Used when giving an exclusive upper bound for the range. - end_key_closed: - Used when giving an inclusive upper bound for the range. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowRange) - }, -) -_sym_db.RegisterMessage(RowRange) - -RowSet = _reflection.GeneratedProtocolMessageType( - "RowSet", - (_message.Message,), - { - "DESCRIPTOR": _ROWSET, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a non-contiguous set of rows. - - Attributes: - row_keys: - Single rows included in the set. - row_ranges: - Contiguous row ranges included in the set. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowSet) - }, -) -_sym_db.RegisterMessage(RowSet) - -ColumnRange = _reflection.GeneratedProtocolMessageType( - "ColumnRange", - (_message.Message,), - { - "DESCRIPTOR": _COLUMNRANGE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a contiguous range of columns within a single column family. - The range spans from : to - :, where both bounds can be either - inclusive or exclusive. - - Attributes: - family_name: - The name of the column family within which this range falls. - start_qualifier: - The column qualifier at which to start the range (within - ``column_family``). If neither field is set, interpreted as - the empty string, inclusive. - start_qualifier_closed: - Used when giving an inclusive lower bound for the range. - start_qualifier_open: - Used when giving an exclusive lower bound for the range. - end_qualifier: - The column qualifier at which to end the range (within - ``column_family``). If neither field is set, interpreted as - the infinite string, exclusive. - end_qualifier_closed: - Used when giving an inclusive upper bound for the range. - end_qualifier_open: - Used when giving an exclusive upper bound for the range. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ColumnRange) - }, -) -_sym_db.RegisterMessage(ColumnRange) - -TimestampRange = _reflection.GeneratedProtocolMessageType( - "TimestampRange", - (_message.Message,), - { - "DESCRIPTOR": _TIMESTAMPRANGE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specified a contiguous range of microsecond timestamps. - - Attributes: - start_timestamp_micros: - Inclusive lower bound. If left empty, interpreted as 0. - end_timestamp_micros: - Exclusive upper bound. If left empty, interpreted as infinity. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.TimestampRange) - }, -) -_sym_db.RegisterMessage(TimestampRange) - -ValueRange = _reflection.GeneratedProtocolMessageType( - "ValueRange", - (_message.Message,), - { - "DESCRIPTOR": _VALUERANGE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a contiguous range of raw byte values. - - Attributes: - start_value: - The value at which to start the range. If neither field is - set, interpreted as the empty string, inclusive. - start_value_closed: - Used when giving an inclusive lower bound for the range. - start_value_open: - Used when giving an exclusive lower bound for the range. - end_value: - The value at which to end the range. If neither field is set, - interpreted as the infinite string, exclusive. - end_value_closed: - Used when giving an inclusive upper bound for the range. - end_value_open: - Used when giving an exclusive upper bound for the range. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ValueRange) - }, -) -_sym_db.RegisterMessage(ValueRange) - -RowFilter = _reflection.GeneratedProtocolMessageType( - "RowFilter", - (_message.Message,), - { - "Chain": _reflection.GeneratedProtocolMessageType( - "Chain", - (_message.Message,), - { - "DESCRIPTOR": _ROWFILTER_CHAIN, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A RowFilter which sends rows through several RowFilters in sequence. - - Attributes: - filters: - The elements of “filters” are chained together to process the - input row: in row -> f(0) -> intermediate row -> f(1) -> … -> - f(N) -> out row The full chain is executed atomically. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Chain) - }, - ), - "Interleave": _reflection.GeneratedProtocolMessageType( - "Interleave", - (_message.Message,), - { - "DESCRIPTOR": _ROWFILTER_INTERLEAVE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A RowFilter which sends each row to each of several component - RowFilters and interleaves the results. - - Attributes: - filters: - The elements of “filters” all process a copy of the input row, - and the results are pooled, sorted, and combined into a single - output row. If multiple cells are produced with the same - column and timestamp, they will all appear in the output row - in an unspecified mutual order. Consider the following - example, with three filters: :: - input row | - ----------------------------------------------------- - | | | - f(0) f(1) f(2) - | | | 1: - foo,bar,10,x foo,bar,10,z far,bar,7,a - 2: foo,blah,11,z far,blah,5,x - far,blah,5,x | | - | - ----------------------------------------------------- - | 1: foo,bar,10,z // could have - switched with #2 2: foo,bar,10,x // - could have switched with #1 3: - foo,blah,11,z 4: far,bar,7,a 5: - far,blah,5,x // identical to #6 6: - far,blah,5,x // identical to #5 All interleaved filters are - executed atomically. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Interleave) - }, - ), - "Condition": _reflection.GeneratedProtocolMessageType( - "Condition", - (_message.Message,), - { - "DESCRIPTOR": _ROWFILTER_CONDITION, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A RowFilter which evaluates one of two possible RowFilters, depending - on whether or not a predicate RowFilter outputs any cells from the - input row. IMPORTANT NOTE: The predicate filter does not execute - atomically with the true and false filters, which may lead to - inconsistent or unexpected results. Additionally, Condition filters - have poor performance, especially when filters are set for the false - condition. - - Attributes: - predicate_filter: - If ``predicate_filter`` outputs any cells, then - ``true_filter`` will be evaluated on the input row. Otherwise, - ``false_filter`` will be evaluated. - true_filter: - The filter to apply to the input row if ``predicate_filter`` - returns any results. If not provided, no results will be - returned in the true case. - false_filter: - The filter to apply to the input row if ``predicate_filter`` - does not return any results. If not provided, no results will - be returned in the false case. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Condition) - }, - ), - "DESCRIPTOR": _ROWFILTER, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Takes a row as input and produces an alternate view of the row based - on specified rules. For example, a RowFilter might trim down a row to - include just the cells from columns matching a given regular - expression, or might return all the cells of a row but not their - values. More complicated filters can be composed out of these - components to express requests such as, “within every column of a - particular family, give just the two most recent cells which are older - than timestamp X.” There are two broad categories of RowFilters (true - filters and transformers), as well as two ways to compose simple - filters into more complex ones (chains and interleaves). They work as - follows: - True filters alter the input row by excluding some of its - cells wholesale from the output row. An example of a true filter is - the ``value_regex_filter``, which excludes cells whose values don’t - match the specified pattern. All regex true filters use RE2 syntax - (https://github.com/google/re2/wiki/Syntax) in raw byte mode - (RE2::Latin1), and are evaluated as full matches. An important point - to keep in mind is that ``RE2(.)`` is equivalent by default to - ``RE2([^\n])``, meaning that it does not match newlines. When - attempting to match an arbitrary byte, you should therefore use the - escape sequence ``\C``, which may need to be further escaped as - ``\\C`` in your client language. - Transformers alter the input row - by changing the values of some of its cells in the output, without - excluding them completely. Currently, the only supported - transformer is the ``strip_value_transformer``, which replaces - every cell’s value with the empty string. - Chains and - interleaves are described in more detail in the RowFilter.Chain and - RowFilter.Interleave documentation. The total serialized size of a - RowFilter message must not exceed 4096 bytes, and RowFilters may not - be nested within each other (in Chains or Interleaves) to a depth of - more than 20. - - Attributes: - filter: - Which of the possible RowFilter types to apply. If none are - set, this RowFilter returns all cells in the input row. - chain: - Applies several RowFilters to the data in sequence, - progressively narrowing the results. - interleave: - Applies several RowFilters to the data in parallel and - combines the results. - condition: - Applies one of two possible RowFilters to the data based on - the output of a predicate RowFilter. - sink: - ADVANCED USE ONLY. Hook for introspection into the RowFilter. - Outputs all cells directly to the output of the read rather - than to any parent filter. Consider the following example: :: - Chain( FamilyRegex("A"), Interleave( All(), - Chain(Label("foo"), Sink()) ), QualifierRegex("B") - ) A,A,1,w - A,B,2,x B,B,4,z - | FamilyRegex("A") - | A,A,1,w - A,B,2,x | - +------------+-------------+ | - | All() Label(foo) - | | A,A,1,w - A,A,1,w,labels:[foo] A,B,2,x - A,B,2,x,labels:[foo] | | - | Sink() --------------+ | - | | +------------+ x------+ - A,A,1,w,labels:[foo] | - A,B,2,x,labels:[foo] A,A,1,w - | A,B,2,x | - | | - QualifierRegex("B") | - | | - A,B,2,x | - | | - +--------------------------------+ | - A,A,1,w,labels:[foo] - A,B,2,x,labels:[foo] // could be switched - A,B,2,x // could be switched Despite being - excluded by the qualifier filter, a copy of every cell that - reaches the sink is present in the final result. As with an - [Interleave][google.bigtable.v2.RowFilter.Interleave], - duplicate cells are possible, and appear in an unspecified - mutual order. In this case we have a duplicate with column - “A:B” and timestamp 2, because one copy passed through the all - filter while the other was passed through the label and sink. - Note that one copy has label “foo”, while the other does not. - Cannot be used within the ``predicate_filter``, - ``true_filter``, or ``false_filter`` of a - [Condition][google.bigtable.v2.RowFilter.Condition]. - pass_all_filter: - Matches all cells, regardless of input. Functionally - equivalent to leaving ``filter`` unset, but included for - completeness. - block_all_filter: - Does not match any cells, regardless of input. Useful for - temporarily disabling just part of a filter. - row_key_regex_filter: - Matches only cells from rows whose keys satisfy the given RE2 - regex. In other words, passes through the entire row when the - key matches, and otherwise produces an empty row. Note that, - since row keys can contain arbitrary bytes, the ``\C`` escape - sequence must be used if a true wildcard is desired. The ``.`` - character will not match the new line character ``\n``, which - may be present in a binary key. - row_sample_filter: - Matches all cells from a row with probability p, and matches - no cells from the row with probability 1-p. - family_name_regex_filter: - Matches only cells from columns whose families satisfy the - given RE2 regex. For technical reasons, the regex must not - contain the ``:`` character, even if it is not being used as a - literal. Note that, since column families cannot contain the - new line character ``\n``, it is sufficient to use ``.`` as a - full wildcard when matching column family names. - column_qualifier_regex_filter: - Matches only cells from columns whose qualifiers satisfy the - given RE2 regex. Note that, since column qualifiers can - contain arbitrary bytes, the ``\C`` escape sequence must be - used if a true wildcard is desired. The ``.`` character will - not match the new line character ``\n``, which may be present - in a binary qualifier. - column_range_filter: - Matches only cells from columns within the given range. - timestamp_range_filter: - Matches only cells with timestamps within the given range. - value_regex_filter: - Matches only cells with values that satisfy the given regular - expression. Note that, since cell values can contain arbitrary - bytes, the ``\C`` escape sequence must be used if a true - wildcard is desired. The ``.`` character will not match the - new line character ``\n``, which may be present in a binary - value. - value_range_filter: - Matches only cells with values that fall within the given - range. - cells_per_row_offset_filter: - Skips the first N cells of each row, matching all subsequent - cells. If duplicate cells are present, as is possible when - using an Interleave, each copy of the cell is counted - separately. - cells_per_row_limit_filter: - Matches only the first N cells of each row. If duplicate cells - are present, as is possible when using an Interleave, each - copy of the cell is counted separately. - cells_per_column_limit_filter: - Matches only the most recent N cells within each column. For - example, if N=2, this filter would match column ``foo:bar`` at - timestamps 10 and 9, skip all earlier cells in ``foo:bar``, - and then begin matching again in column ``foo:bar2``. If - duplicate cells are present, as is possible when using an - Interleave, each copy of the cell is counted separately. - strip_value_transformer: - Replaces each cell’s value with the empty string. - apply_label_transformer: - Applies the given label to all cells in the output row. This - allows the client to determine which results were produced - from which part of the filter. Values must be at most 15 - characters in length, and match the RE2 pattern - ``[a-z0-9\\-]+`` Due to a technical limitation, it is not - currently possible to apply multiple labels to a cell. As a - result, a Chain may have no more than one sub-filter which - contains a ``apply_label_transformer``. It is okay for an - Interleave to contain multiple ``apply_label_transformers``, - as they will be applied to separate copies of the input. This - may be relaxed in the future. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter) - }, -) -_sym_db.RegisterMessage(RowFilter) -_sym_db.RegisterMessage(RowFilter.Chain) -_sym_db.RegisterMessage(RowFilter.Interleave) -_sym_db.RegisterMessage(RowFilter.Condition) - -Mutation = _reflection.GeneratedProtocolMessageType( - "Mutation", - (_message.Message,), - { - "SetCell": _reflection.GeneratedProtocolMessageType( - "SetCell", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_SETCELL, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A Mutation which sets the value of the specified cell. - - Attributes: - family_name: - The name of the family into which new data should be written. - Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier: - The qualifier of the column into which new data should be - written. Can be any byte string, including the empty string. - timestamp_micros: - The timestamp of the cell into which new data should be - written. Use -1 for current Bigtable server time. Otherwise, - the client should set this value itself, noting that the - default value is a timestamp of zero if the field is left - unspecified. Values must match the granularity of the table - (e.g. micros, millis). - value: - The value to be written into the specified cell. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.SetCell) - }, - ), - "DeleteFromColumn": _reflection.GeneratedProtocolMessageType( - "DeleteFromColumn", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_DELETEFROMCOLUMN, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A Mutation which deletes cells from the specified column, optionally - restricting the deletions to a given timestamp range. - - Attributes: - family_name: - The name of the family from which cells should be deleted. - Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier: - The qualifier of the column from which cells should be - deleted. Can be any byte string, including the empty string. - time_range: - The range of timestamps within which cells should be deleted. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromColumn) - }, - ), - "DeleteFromFamily": _reflection.GeneratedProtocolMessageType( - "DeleteFromFamily", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_DELETEFROMFAMILY, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A Mutation which deletes all cells from the specified column family. - - Attributes: - family_name: - The name of the family from which cells should be deleted. - Must match ``[-_.a-zA-Z0-9]+`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromFamily) - }, - ), - "DeleteFromRow": _reflection.GeneratedProtocolMessageType( - "DeleteFromRow", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_DELETEFROMROW, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A Mutation which deletes all cells from the containing row.""", - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) - }, - ), - "DESCRIPTOR": _MUTATION, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a particular change to be made to the contents of a row. - - Attributes: - mutation: - Which of the possible Mutation types to apply. - set_cell: - Set a cell’s value. - delete_from_column: - Deletes cells from a column. - delete_from_family: - Deletes cells from a column family. - delete_from_row: - Deletes cells from the entire row. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation) - }, -) -_sym_db.RegisterMessage(Mutation) -_sym_db.RegisterMessage(Mutation.SetCell) -_sym_db.RegisterMessage(Mutation.DeleteFromColumn) -_sym_db.RegisterMessage(Mutation.DeleteFromFamily) -_sym_db.RegisterMessage(Mutation.DeleteFromRow) - -ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType( - "ReadModifyWriteRule", - (_message.Message,), - { - "DESCRIPTOR": _READMODIFYWRITERULE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies an atomic read/modify/write operation on the latest value of - the specified column. - - Attributes: - family_name: - The name of the family to which the read/modify/write should - be applied. Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier: - The qualifier of the column to which the read/modify/write - should be applied. Can be any byte string, including the empty - string. - rule: - The rule used to determine the column’s new latest value from - its current latest value. - append_value: - Rule specifying that ``append_value`` be appended to the - existing value. If the targeted cell is unset, it will be - treated as containing the empty string. - increment_amount: - Rule specifying that ``increment_amount`` be added to the - existing value. If the targeted cell is unset, it will be - treated as containing a zero. Otherwise, the targeted cell - must contain an 8-byte value (interpreted as a 64-bit big- - endian signed integer), or the entire request will fail. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRule) - }, -) -_sym_db.RegisterMessage(ReadModifyWriteRule) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_v2/proto/data_pb2_grpc.py b/google/cloud/bigtable_v2/proto/data_pb2_grpc.py deleted file mode 100644 index 8a9393943..000000000 --- a/google/cloud/bigtable_v2/proto/data_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index 8ae811054..a9f3dfd74 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -621,12 +621,11 @@ def mutate_row( request.table_name = table_name if row_key is not None: request.row_key = row_key + if mutations is not None: + request.mutations = mutations if app_profile_id is not None: request.app_profile_id = app_profile_id - if mutations: - request.mutations.extend(mutations) - # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.mutate_row] @@ -730,12 +729,11 @@ def mutate_rows( if table_name is not None: request.table_name = table_name + if entries is not None: + request.entries = entries if app_profile_id is not None: request.app_profile_id = app_profile_id - if entries: - request.entries.extend(entries) - # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.mutate_rows] @@ -881,14 +879,13 @@ def check_and_mutate_row( request.row_key = row_key if predicate_filter is not None: request.predicate_filter = predicate_filter + if true_mutations is not None: + request.true_mutations = true_mutations + if false_mutations is not None: + request.false_mutations = false_mutations if app_profile_id is not None: request.app_profile_id = app_profile_id - if true_mutations: - request.true_mutations.extend(true_mutations) - if false_mutations: - request.false_mutations.extend(false_mutations) - # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.check_and_mutate_row] @@ -1005,12 +1002,11 @@ def read_modify_write_row( request.table_name = table_name if row_key is not None: request.row_key = row_key + if rules is not None: + request.rules = rules if app_profile_id is not None: request.app_profile_id = app_profile_id - if rules: - request.rules.extend(rules) - # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.read_modify_write_row] diff --git a/google/cloud/bigtable_v2/types.py b/google/cloud/bigtable_v2/types.py deleted file mode 100644 index 607e1b09c..000000000 --- a/google/cloud/bigtable_v2/types.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.bigtable_v2.proto import bigtable_pb2 -from google.cloud.bigtable_v2.proto import data_pb2 -from google.protobuf import any_pb2 -from google.protobuf import wrappers_pb2 -from google.rpc import status_pb2 - - -_shared_modules = [ - any_pb2, - wrappers_pb2, - status_pb2, -] - -_local_modules = [ - bigtable_pb2, - data_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.bigtable_v2.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/synth.metadata b/synth.metadata index 1b0b9dbfe..0acc26a69 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "cbbd3170bcf217e36ae72f4ac522449bf861346f", - "internalRef": "346894665" + "sha": "1e0c7413684ca6f6322620ecfc0d3e0352933dc1", + "internalRef": "356992836" } }, { @@ -106,46 +106,50 @@ "docs/conf.py", "docs/multiprocessing.rst", "google/cloud/bigtable_admin_v2/__init__.py", - "google/cloud/bigtable_admin_v2/gapic/__init__.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py", - "google/cloud/bigtable_admin_v2/gapic/enums.py", - "google/cloud/bigtable_admin_v2/gapic/transports/__init__.py", - "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py", - "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py", - "google/cloud/bigtable_admin_v2/proto/__init__.py", "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto", - "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py", - "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py", "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto", - "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py", - "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py", "google/cloud/bigtable_admin_v2/proto/common.proto", - "google/cloud/bigtable_admin_v2/proto/common_pb2.py", - "google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py", "google/cloud/bigtable_admin_v2/proto/instance.proto", - "google/cloud/bigtable_admin_v2/proto/instance_pb2.py", - "google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py", "google/cloud/bigtable_admin_v2/proto/table.proto", - "google/cloud/bigtable_admin_v2/proto/table_pb2.py", - "google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py", - "google/cloud/bigtable_admin_v2/types.py", + "google/cloud/bigtable_admin_v2/py.typed", + "google/cloud/bigtable_admin_v2/services/__init__.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py", + "google/cloud/bigtable_admin_v2/types/__init__.py", + "google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py", + "google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py", + "google/cloud/bigtable_admin_v2/types/common.py", + "google/cloud/bigtable_admin_v2/types/instance.py", + "google/cloud/bigtable_admin_v2/types/table.py", "google/cloud/bigtable_v2/__init__.py", - "google/cloud/bigtable_v2/gapic/__init__.py", - "google/cloud/bigtable_v2/gapic/bigtable_client.py", - "google/cloud/bigtable_v2/gapic/bigtable_client_config.py", - "google/cloud/bigtable_v2/gapic/transports/__init__.py", - "google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py", - "google/cloud/bigtable_v2/proto/__init__.py", "google/cloud/bigtable_v2/proto/bigtable.proto", - "google/cloud/bigtable_v2/proto/bigtable_pb2.py", - "google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py", "google/cloud/bigtable_v2/proto/data.proto", - "google/cloud/bigtable_v2/proto/data_pb2.py", - "google/cloud/bigtable_v2/proto/data_pb2_grpc.py", - "google/cloud/bigtable_v2/types.py", + "google/cloud/bigtable_v2/py.typed", + "google/cloud/bigtable_v2/services/__init__.py", + "google/cloud/bigtable_v2/services/bigtable/__init__.py", + "google/cloud/bigtable_v2/services/bigtable/async_client.py", + "google/cloud/bigtable_v2/services/bigtable/client.py", + "google/cloud/bigtable_v2/services/bigtable/transports/__init__.py", + "google/cloud/bigtable_v2/services/bigtable/transports/base.py", + "google/cloud/bigtable_v2/services/bigtable/transports/grpc.py", + "google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py", + "google/cloud/bigtable_v2/types/__init__.py", + "google/cloud/bigtable_v2/types/bigtable.py", + "google/cloud/bigtable_v2/types/data.py", "noxfile.py", "renovate.json", "samples/AUTHORING_GUIDE.md", @@ -160,6 +164,8 @@ "samples/snippets/README.md", "samples/tableadmin/README.md", "scripts/decrypt-secrets.sh", + "scripts/fixup_bigtable_admin_v2_keywords.py", + "scripts/fixup_bigtable_v2_keywords.py", "scripts/readme-gen/readme_gen.py", "scripts/readme-gen/templates/README.tmpl.rst", "scripts/readme-gen/templates/auth.tmpl.rst", @@ -168,8 +174,10 @@ "scripts/readme-gen/templates/install_portaudio.tmpl.rst", "setup.cfg", "testing/.gitignore", - "tests/unit/gapic/v2/test_bigtable_client_v2.py", - "tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py", - "tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py" + "tests/unit/gapic/bigtable_admin_v2/__init__.py", + "tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py", + "tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py", + "tests/unit/gapic/bigtable_v2/__init__.py", + "tests/unit/gapic/bigtable_v2/test_bigtable.py" ] } \ No newline at end of file diff --git a/tests/unit/gapic/v2/test_bigtable_client_v2.py b/tests/unit/gapic/v2/test_bigtable_client_v2.py deleted file mode 100644 index 84abfecef..000000000 --- a/tests/unit/gapic/v2/test_bigtable_client_v2.py +++ /dev/null @@ -1,316 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import bigtable_v2 -from google.cloud.bigtable_v2.proto import bigtable_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def unary_stream(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableClient(object): - def test_read_rows(self): - # Setup Expected Response - last_scanned_row_key = b"-126" - expected_response = {"last_scanned_row_key": last_scanned_row_key} - expected_response = bigtable_pb2.ReadRowsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.read_rows(table_name) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.ReadRowsRequest(table_name=table_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_rows_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.read_rows(table_name) - - def test_sample_row_keys(self): - # Setup Expected Response - row_key = b"122" - offset_bytes = 889884095 - expected_response = {"row_key": row_key, "offset_bytes": offset_bytes} - expected_response = bigtable_pb2.SampleRowKeysResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.sample_row_keys(table_name) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.SampleRowKeysRequest(table_name=table_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_sample_row_keys_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.sample_row_keys(table_name) - - def test_mutate_row(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.MutateRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - mutations = [] - - response = client.mutate_row(table_name, row_key, mutations) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.MutateRowRequest( - table_name=table_name, row_key=row_key, mutations=mutations - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_mutate_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - mutations = [] - - with pytest.raises(CustomException): - client.mutate_row(table_name, row_key, mutations) - - def test_mutate_rows(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.MutateRowsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - entries = [] - - response = client.mutate_rows(table_name, entries) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, entries=entries - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_mutate_rows_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - entries = [] - - with pytest.raises(CustomException): - client.mutate_rows(table_name, entries) - - def test_check_and_mutate_row(self): - # Setup Expected Response - predicate_matched = True - expected_response = {"predicate_matched": predicate_matched} - expected_response = bigtable_pb2.CheckAndMutateRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - - response = client.check_and_mutate_row(table_name, row_key) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.CheckAndMutateRowRequest( - table_name=table_name, row_key=row_key - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_check_and_mutate_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - - with pytest.raises(CustomException): - client.check_and_mutate_row(table_name, row_key) - - def test_read_modify_write_row(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.ReadModifyWriteRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - rules = [] - - response = client.read_modify_write_row(table_name, row_key, rules) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.ReadModifyWriteRowRequest( - table_name=table_name, row_key=row_key, rules=rules - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_modify_write_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - rules = [] - - with pytest.raises(CustomException): - client.read_modify_write_row(table_name, row_key, rules) diff --git a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py deleted file mode 100644 index 626113e52..000000000 --- a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ /dev/null @@ -1,926 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableInstanceAdminClient(object): - def test_create_instance(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - clusters = {} - - response = client.create_instance(parent, instance_id, instance, clusters) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, instance_id=instance_id, instance=instance, clusters=clusters - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - clusters = {} - - response = client.create_instance(parent, instance_id, instance, clusters) - exception = response.exception() - assert exception.errors[0] == error - - def test_partial_update_instance(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_partial_update_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - instance = {} - update_mask = {} - - response = client.partial_update_instance(instance, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_partial_update_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_partial_update_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - instance = {} - update_mask = {} - - response = client.partial_update_instance(instance, update_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_create_cluster(self): - # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} - - response = client.create_cluster(parent, cluster_id, cluster) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} - - response = client.create_cluster(parent, cluster_id, cluster) - exception = response.exception() - assert exception.errors[0] == error - - def test_update_cluster(self): - # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes_2 = 1623486220 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes_2, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - serve_nodes = 1288838783 - - response = client.update_cluster(serve_nodes) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Cluster(serve_nodes=serve_nodes) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - serve_nodes = 1288838783 - - response = client.update_cluster(serve_nodes) - exception = response.exception() - assert exception.errors[0] == error - - def test_update_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_app_profile", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_app_profile_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_app_profile_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_instance(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - expected_response = {"name": name_2, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.get_instance(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.get_instance(name) - - def test_list_instances(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - - response = client.list_instances(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_instances_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.project_path("[PROJECT]") - - with pytest.raises(CustomException): - client.list_instances(parent) - - def test_update_instance(self): - # Setup Expected Response - name = "name3373707" - display_name_2 = "displayName21615000987" - expected_response = {"name": name, "display_name": display_name_2} - expected_response = instance_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - display_name = "displayName1615086568" - - response = client.update_instance(display_name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Instance(display_name=display_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - display_name = "displayName1615086568" - - with pytest.raises(CustomException): - client.update_instance(display_name) - - def test_delete_instance(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - client.delete_instance(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.delete_instance(name) - - def test_get_cluster(self): - # Setup Expected Response - name_2 = "name2-1052831874" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name_2, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - response = client.get_cluster(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetClusterRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - with pytest.raises(CustomException): - client.get_cluster(name) - - def test_list_clusters(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListClustersResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.list_clusters(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_clusters_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.list_clusters(parent) - - def test_delete_cluster(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - client.delete_cluster(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - with pytest.raises(CustomException): - client.delete_cluster(name) - - def test_create_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - app_profile_id = "appProfileId1262094415" - app_profile = {} - - response = client.create_app_profile(parent, app_profile_id, app_profile) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest( - parent=parent, app_profile_id=app_profile_id, app_profile=app_profile - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - app_profile_id = "appProfileId1262094415" - app_profile = {} - - with pytest.raises(CustomException): - client.create_app_profile(parent, app_profile_id, app_profile) - - def test_get_app_profile(self): - # Setup Expected Response - name_2 = "name2-1052831874" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name_2, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - response = client.get_app_profile(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - with pytest.raises(CustomException): - client.get_app_profile(name) - - def test_list_app_profiles(self): - # Setup Expected Response - next_page_token = "" - app_profiles_element = {} - app_profiles = [app_profiles_element] - expected_response = { - "next_page_token": next_page_token, - "app_profiles": app_profiles, - } - expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_app_profiles(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.app_profiles[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_app_profiles_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_app_profiles(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_app_profile(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - ignore_warnings = True - - client.delete_app_profile(name, ignore_warnings) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name, ignore_warnings=ignore_warnings - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - ignore_warnings = True - - with pytest.raises(CustomException): - client.delete_app_profile(name, ignore_warnings) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) diff --git a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py deleted file mode 100644 index 2ca0a34f9..000000000 --- a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ /dev/null @@ -1,1049 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableTableAdminClient(object): - def test_create_table_from_snapshot(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - source_snapshot = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, table_id=table_id, source_snapshot=source_snapshot - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_table_from_snapshot_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - source_snapshot = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - exception = response.exception() - assert exception.errors[0] == error - - def test_snapshot_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_snapshot_table", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - snapshot_id = "snapshotId-168585866" - - response = client.snapshot_table(name, cluster, snapshot_id) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, cluster=cluster, snapshot_id=snapshot_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_snapshot_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_snapshot_table_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - snapshot_id = "snapshotId-168585866" - - response = client.snapshot_table(name, cluster, snapshot_id) - exception = response.exception() - assert exception.errors[0] == error - - def test_create_backup(self): - # Setup Expected Response - name = "name3373707" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_backup", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - backup_id = "backupId1355353272" - backup = {} - - response = client.create_backup(parent, backup_id, backup) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateBackupRequest( - parent=parent, backup_id=backup_id, backup=backup - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_backup_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_backup_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - backup_id = "backupId1355353272" - backup = {} - - response = client.create_backup(parent, backup_id, backup) - exception = response.exception() - assert exception.errors[0] == error - - def test_restore_table(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_restore_table", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - - response = client.restore_table(parent, table_id) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.RestoreTableRequest( - parent=parent, table_id=table_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_restore_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_restore_table_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - - response = client.restore_table(parent, table_id) - exception = response.exception() - assert exception.errors[0] == error - - def test_create_table(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - table = {} - - response = client.create_table(parent, table_id, table) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, table_id=table_id, table=table - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - table = {} - - with pytest.raises(CustomException): - client.create_table(parent, table_id, table) - - def test_list_tables(self): - # Setup Expected Response - next_page_token = "" - tables_element = {} - tables = [tables_element] - expected_response = {"next_page_token": next_page_token, "tables": tables} - expected_response = bigtable_table_admin_pb2.ListTablesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_tables(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.tables[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListTablesRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_tables_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_tables(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.get_table(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetTableRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.get_table(name) - - def test_delete_table(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - client.delete_table(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteTableRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.delete_table(name) - - def test_modify_column_families(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - modifications = [] - - response = client.modify_column_families(name, modifications) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, modifications=modifications - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_modify_column_families_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - modifications = [] - - with pytest.raises(CustomException): - client.modify_column_families(name, modifications) - - def test_drop_row_range(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - client.drop_row_range(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DropRowRangeRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_drop_row_range_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.drop_row_range(name) - - def test_generate_consistency_token(self): - # Setup Expected Response - consistency_token = "consistencyToken-1090516718" - expected_response = {"consistency_token": consistency_token} - expected_response = bigtable_table_admin_pb2.GenerateConsistencyTokenResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.generate_consistency_token(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_generate_consistency_token_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.generate_consistency_token(name) - - def test_check_consistency(self): - # Setup Expected Response - consistent = True - expected_response = {"consistent": consistent} - expected_response = bigtable_table_admin_pb2.CheckConsistencyResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - consistency_token = "consistencyToken-1090516718" - - response = client.check_consistency(name, consistency_token) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, consistency_token=consistency_token - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_check_consistency_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - consistency_token = "consistencyToken-1090516718" - - with pytest.raises(CustomException): - client.check_consistency(name, consistency_token) - - def test_get_snapshot(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.get_snapshot(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_snapshot_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - with pytest.raises(CustomException): - client.get_snapshot(name) - - def test_list_snapshots(self): - # Setup Expected Response - next_page_token = "" - snapshots_element = {} - snapshots = [snapshots_element] - expected_response = {"next_page_token": next_page_token, "snapshots": snapshots} - expected_response = bigtable_table_admin_pb2.ListSnapshotsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_snapshots(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.snapshots[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_snapshots_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_snapshots(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_snapshot(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - client.delete_snapshot(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_snapshot_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - with pytest.raises(CustomException): - client.delete_snapshot(name) - - def test_get_backup(self): - # Setup Expected Response - name_2 = "name2-1052831874" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name_2, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - response = client.get_backup(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetBackupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - with pytest.raises(CustomException): - client.get_backup(name) - - def test_update_backup(self): - # Setup Expected Response - name = "name3373707" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - backup = {} - update_mask = {} - - response = client.update_backup(backup, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.UpdateBackupRequest( - backup=backup, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - backup = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_backup(backup, update_mask) - - def test_delete_backup(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - client.delete_backup(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - with pytest.raises(CustomException): - client.delete_backup(name) - - def test_list_backups(self): - # Setup Expected Response - next_page_token = "" - backups_element = {} - backups = [backups_element] - expected_response = {"next_page_token": next_page_token, "backups": backups} - expected_response = bigtable_table_admin_pb2.ListBackupsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_backups(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.backups[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListBackupsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_backups_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_backups(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) From 03c2a7f4d0ab74186a9deaf14835c9ac8b59e6da Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 13 Feb 2021 06:20:10 -0800 Subject: [PATCH 11/11] build: add BUILD files to generate client library for orgpolicy PiperOrigin-RevId: 357293749 Source-Author: Google APIs Source-Date: Fri Feb 12 16:54:43 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: 78f5365656fda2855cf0546080da559d6c2ed3ff Source-Link: https://github.com/googleapis/googleapis/commit/78f5365656fda2855cf0546080da559d6c2ed3ff --- google/cloud/bigtable_admin_v2/__init__.py | 4 ++-- synth.metadata | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py index cf8e394ef..423742502 100644 --- a/google/cloud/bigtable_admin_v2/__init__.py +++ b/google/cloud/bigtable_admin_v2/__init__.py @@ -87,7 +87,7 @@ "AppProfile", "Backup", "BackupInfo", - "BigtableTableAdminClient", + "BigtableInstanceAdminClient", "CheckConsistencyRequest", "CheckConsistencyResponse", "Cluster", @@ -149,5 +149,5 @@ "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", - "BigtableInstanceAdminClient", + "BigtableTableAdminClient", ) diff --git a/synth.metadata b/synth.metadata index 0acc26a69..4a5ed7b92 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "1e0c7413684ca6f6322620ecfc0d3e0352933dc1", - "internalRef": "356992836" + "sha": "78f5365656fda2855cf0546080da559d6c2ed3ff", + "internalRef": "357293749" } }, {