diff --git a/google/cloud/spanner_admin_database_v1/__init__.py b/google/cloud/spanner_admin_database_v1/__init__.py index 74715d1e44..d81a0e2dcc 100644 --- a/google/cloud/spanner_admin_database_v1/__init__.py +++ b/google/cloud/spanner_admin_database_v1/__init__.py @@ -32,6 +32,7 @@ from .types.backup import DeleteBackupRequest from .types.backup import FullBackupSpec from .types.backup import GetBackupRequest +from .types.backup import IncrementalBackupSpec from .types.backup import ListBackupOperationsRequest from .types.backup import ListBackupOperationsResponse from .types.backup import ListBackupsRequest @@ -108,6 +109,7 @@ "GetDatabaseDdlRequest", "GetDatabaseDdlResponse", "GetDatabaseRequest", + "IncrementalBackupSpec", "ListBackupOperationsRequest", "ListBackupOperationsResponse", "ListBackupSchedulesRequest", diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py index 083aebcd42..d714d52311 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py @@ -14,7 +14,6 @@ # limitations under the License. # from collections import OrderedDict -import functools import re from typing import ( Dict, @@ -230,9 +229,7 @@ def universe_domain(self) -> str: """ return self._client._universe_domain - get_transport_class = functools.partial( - type(DatabaseAdminClient).get_transport_class, type(DatabaseAdminClient) - ) + get_transport_class = DatabaseAdminClient.get_transport_class def __init__( self, diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py index 9bdd254fb5..0a68cb2e44 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py @@ -819,7 +819,7 @@ def __init__( transport_init: Union[ Type[DatabaseAdminTransport], Callable[..., DatabaseAdminTransport] ] = ( - type(self).get_transport_class(transport) + DatabaseAdminClient.get_transport_class(transport) if isinstance(transport, str) or transport is None else cast(Callable[..., DatabaseAdminTransport], transport) ) diff --git a/google/cloud/spanner_admin_database_v1/types/__init__.py b/google/cloud/spanner_admin_database_v1/types/__init__.py index 2743a7be51..9a9515e9b2 100644 --- a/google/cloud/spanner_admin_database_v1/types/__init__.py +++ b/google/cloud/spanner_admin_database_v1/types/__init__.py @@ -25,6 +25,7 @@ DeleteBackupRequest, FullBackupSpec, GetBackupRequest, + IncrementalBackupSpec, ListBackupOperationsRequest, ListBackupOperationsResponse, ListBackupsRequest, @@ -88,6 +89,7 @@ "DeleteBackupRequest", "FullBackupSpec", "GetBackupRequest", + "IncrementalBackupSpec", "ListBackupOperationsRequest", "ListBackupOperationsResponse", "ListBackupsRequest", diff --git a/google/cloud/spanner_admin_database_v1/types/backup.py b/google/cloud/spanner_admin_database_v1/types/backup.py index 156f16f114..0c220c3953 100644 --- a/google/cloud/spanner_admin_database_v1/types/backup.py +++ b/google/cloud/spanner_admin_database_v1/types/backup.py @@ -44,6 +44,7 @@ "CreateBackupEncryptionConfig", "CopyBackupEncryptionConfig", "FullBackupSpec", + "IncrementalBackupSpec", }, ) @@ -98,6 +99,30 @@ class Backup(proto.Message): equivalent to the ``create_time``. size_bytes (int): Output only. Size of the backup in bytes. + freeable_size_bytes (int): + Output only. The number of bytes that will be + freed by deleting this backup. This value will + be zero if, for example, this backup is part of + an incremental backup chain and younger backups + in the chain require that we keep its data. For + backups not in an incremental backup chain, this + is always the size of the backup. This value may + change if backups on the same chain get created, + deleted or expired. + exclusive_size_bytes (int): + Output only. For a backup in an incremental + backup chain, this is the storage space needed + to keep the data that has changed since the + previous backup. For all other backups, this is + always the size of the backup. This value may + change if backups on the same chain get deleted + or expired. + + This field can be used to calculate the total + storage space used by a set of backups. For + example, the total space used by all backups of + a database can be computed by summing up this + field. state (google.cloud.spanner_admin_database_v1.types.Backup.State): Output only. The current state of the backup. referencing_databases (MutableSequence[str]): @@ -156,6 +181,24 @@ class Backup(proto.Message): If collapsing is not done, then this field captures the single backup schedule URI associated with creating this backup. + incremental_backup_chain_id (str): + Output only. Populated only for backups in an incremental + backup chain. Backups share the same chain id if and only if + they belong to the same incremental backup chain. Use this + field to determine which backups are part of the same + incremental backup chain. The ordering of backups in the + chain can be determined by ordering the backup + ``version_time``. + oldest_version_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Data deleted at a time older + than this is guaranteed not to be retained in + order to support this backup. For a backup in an + incremental backup chain, this is the version + time of the oldest backup that exists or ever + existed in the chain. For all other backups, + this is the version time of the backup. This + field can be used to understand what data is + being retained by the backup system. """ class State(proto.Enum): @@ -201,6 +244,14 @@ class State(proto.Enum): proto.INT64, number=5, ) + freeable_size_bytes: int = proto.Field( + proto.INT64, + number=15, + ) + exclusive_size_bytes: int = proto.Field( + proto.INT64, + number=16, + ) state: State = proto.Field( proto.ENUM, number=6, @@ -240,6 +291,15 @@ class State(proto.Enum): proto.STRING, number=14, ) + incremental_backup_chain_id: str = proto.Field( + proto.STRING, + number=17, + ) + oldest_version_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=18, + message=timestamp_pb2.Timestamp, + ) class CreateBackupRequest(proto.Message): @@ -553,6 +613,7 @@ class ListBackupsRequest(proto.Message): - ``version_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - ``size_bytes`` + - ``backup_schedules`` You can combine multiple expressions by enclosing each expression in parentheses. By default, expressions are @@ -576,6 +637,8 @@ class ListBackupsRequest(proto.Message): ``expire_time`` is before 2018-03-28T14:50:00Z. - ``size_bytes > 10000000000`` - The backup's size is greater than 10GB + - ``backup_schedules:daily`` - The backup is created from a + schedule with "daily" in its name. page_size (int): Number of backups to be returned in the response. If 0 or less, defaults to the server's @@ -999,4 +1062,15 @@ class FullBackupSpec(proto.Message): """ +class IncrementalBackupSpec(proto.Message): + r"""The specification for incremental backup chains. + An incremental backup stores the delta of changes between a + previous backup and the database contents at a given version + time. An incremental backup chain consists of a full backup and + zero or more successive incremental backups. The first backup + created for an incremental backup chain is always a full backup. + + """ + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_admin_database_v1/types/backup_schedule.py b/google/cloud/spanner_admin_database_v1/types/backup_schedule.py index 14ea180bc3..ad9a7ddaf2 100644 --- a/google/cloud/spanner_admin_database_v1/types/backup_schedule.py +++ b/google/cloud/spanner_admin_database_v1/types/backup_schedule.py @@ -66,6 +66,10 @@ class BackupSchedule(proto.Message): specification for a Spanner database. Next ID: 10 + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -96,6 +100,11 @@ class BackupSchedule(proto.Message): full_backup_spec (google.cloud.spanner_admin_database_v1.types.FullBackupSpec): The schedule creates only full backups. + This field is a member of `oneof`_ ``backup_type_spec``. + incremental_backup_spec (google.cloud.spanner_admin_database_v1.types.IncrementalBackupSpec): + The schedule creates incremental backup + chains. + This field is a member of `oneof`_ ``backup_type_spec``. update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The timestamp at which the @@ -129,6 +138,12 @@ class BackupSchedule(proto.Message): oneof="backup_type_spec", message=backup.FullBackupSpec, ) + incremental_backup_spec: backup.IncrementalBackupSpec = proto.Field( + proto.MESSAGE, + number=8, + oneof="backup_type_spec", + message=backup.IncrementalBackupSpec, + ) update_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=9, diff --git a/google/cloud/spanner_admin_instance_v1/__init__.py b/google/cloud/spanner_admin_instance_v1/__init__.py index bf71662118..5d0cad98e8 100644 --- a/google/cloud/spanner_admin_instance_v1/__init__.py +++ b/google/cloud/spanner_admin_instance_v1/__init__.py @@ -49,6 +49,9 @@ from .types.spanner_instance_admin import ListInstancePartitionsResponse from .types.spanner_instance_admin import ListInstancesRequest from .types.spanner_instance_admin import ListInstancesResponse +from .types.spanner_instance_admin import MoveInstanceMetadata +from .types.spanner_instance_admin import MoveInstanceRequest +from .types.spanner_instance_admin import MoveInstanceResponse from .types.spanner_instance_admin import ReplicaInfo from .types.spanner_instance_admin import UpdateInstanceConfigMetadata from .types.spanner_instance_admin import UpdateInstanceConfigRequest @@ -87,6 +90,9 @@ "ListInstancePartitionsResponse", "ListInstancesRequest", "ListInstancesResponse", + "MoveInstanceMetadata", + "MoveInstanceRequest", + "MoveInstanceResponse", "OperationProgress", "ReplicaInfo", "UpdateInstanceConfigMetadata", diff --git a/google/cloud/spanner_admin_instance_v1/gapic_metadata.json b/google/cloud/spanner_admin_instance_v1/gapic_metadata.json index 361a5807c8..60fa46718a 100644 --- a/google/cloud/spanner_admin_instance_v1/gapic_metadata.json +++ b/google/cloud/spanner_admin_instance_v1/gapic_metadata.json @@ -85,6 +85,11 @@ "list_instances" ] }, + "MoveInstance": { + "methods": [ + "move_instance" + ] + }, "SetIamPolicy": { "methods": [ "set_iam_policy" @@ -190,6 +195,11 @@ "list_instances" ] }, + "MoveInstance": { + "methods": [ + "move_instance" + ] + }, "SetIamPolicy": { "methods": [ "set_iam_policy" @@ -295,6 +305,11 @@ "list_instances" ] }, + "MoveInstance": { + "methods": [ + "move_instance" + ] + }, "SetIamPolicy": { "methods": [ "set_iam_policy" diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py index 4b823c48ce..045e5c377a 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py @@ -14,7 +14,6 @@ # limitations under the License. # from collections import OrderedDict -import functools import re from typing import ( Dict, @@ -225,9 +224,7 @@ def universe_domain(self) -> str: """ return self._client._universe_domain - get_transport_class = functools.partial( - type(InstanceAdminClient).get_transport_class, type(InstanceAdminClient) - ) + get_transport_class = InstanceAdminClient.get_transport_class def __init__( self, @@ -545,39 +542,39 @@ async def create_instance_config( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: - r"""Creates an instance config and begins preparing it to be used. - The returned [long-running + r"""Creates an instance configuration and begins preparing it to be + used. The returned [long-running operation][google.longrunning.Operation] can be used to track - the progress of preparing the new instance config. The instance - config name is assigned by the caller. If the named instance - config already exists, ``CreateInstanceConfig`` returns - ``ALREADY_EXISTS``. + the progress of preparing the new instance configuration. The + instance configuration name is assigned by the caller. If the + named instance configuration already exists, + ``CreateInstanceConfig`` returns ``ALREADY_EXISTS``. Immediately after the request returns: - - The instance config is readable via the API, with all - requested attributes. The instance config's + - The instance configuration is readable via the API, with all + requested attributes. The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. Its state is ``CREATING``. While the operation is pending: - - Cancelling the operation renders the instance config + - Cancelling the operation renders the instance configuration immediately unreadable via the API. - Except for deleting the creating resource, all other attempts - to modify the instance config are rejected. + to modify the instance configuration are rejected. Upon completion of the returned operation: - Instances can be created using the instance configuration. - - The instance config's + - The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. Its state becomes ``READY``. The returned [long-running operation][google.longrunning.Operation] will have a name of the format ``/operations/`` and - can be used to track creation of the instance config. The + can be used to track creation of the instance configuration. The [metadata][google.longrunning.Operation.metadata] field type is [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. The [response][google.longrunning.Operation.response] field type @@ -626,7 +623,7 @@ async def sample_create_instance_config(): [CreateInstanceConfigRequest][InstanceAdmin.CreateInstanceConfigRequest]. parent (:class:`str`): Required. The name of the project in which to create the - instance config. Values are of the form + instance configuration. Values are of the form ``projects/``. This corresponds to the ``parent`` field @@ -644,11 +641,11 @@ async def sample_create_instance_config(): on the ``request`` instance; if ``request`` is provided, this should not be set. instance_config_id (:class:`str`): - Required. The ID of the instance config to create. Valid - identifiers are of the form + Required. The ID of the instance configuration to + create. Valid identifiers are of the form ``custom-[-a-z0-9]*[a-z0-9]`` and must be between 2 and 64 characters in length. The ``custom-`` prefix is - required to avoid name conflicts with Google managed + required to avoid name conflicts with Google-managed configurations. This corresponds to the ``instance_config_id`` field @@ -739,16 +736,16 @@ async def update_instance_config( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: - r"""Updates an instance config. The returned [long-running + r"""Updates an instance configuration. The returned [long-running operation][google.longrunning.Operation] can be used to track the progress of updating the instance. If the named instance - config does not exist, returns ``NOT_FOUND``. + configuration does not exist, returns ``NOT_FOUND``. - Only user managed configurations can be updated. + Only user-managed configurations can be updated. Immediately after the request returns: - - The instance config's + - The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. @@ -759,25 +756,27 @@ async def update_instance_config( The operation is guaranteed to succeed at undoing all changes, after which point it terminates with a ``CANCELLED`` status. - - All other attempts to modify the instance config are + - All other attempts to modify the instance configuration are rejected. - - Reading the instance config via the API continues to give the - pre-request values. + - Reading the instance configuration via the API continues to + give the pre-request values. Upon completion of the returned operation: - Creating instances using the instance configuration uses the new values. - - The instance config's new values are readable via the API. - - The instance config's + - The new values of the instance configuration are readable via + the API. + - The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. The returned [long-running operation][google.longrunning.Operation] will have a name of the format ``/operations/`` and - can be used to track the instance config modification. The - [metadata][google.longrunning.Operation.metadata] field type is + can be used to track the instance configuration modification. + The [metadata][google.longrunning.Operation.metadata] field type + is [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. The [response][google.longrunning.Operation.response] field type is @@ -822,9 +821,9 @@ async def sample_update_instance_config(): The request object. The request for [UpdateInstanceConfigRequest][InstanceAdmin.UpdateInstanceConfigRequest]. instance_config (:class:`google.cloud.spanner_admin_instance_v1.types.InstanceConfig`): - Required. The user instance config to update, which must - always include the instance config name. Otherwise, only - fields mentioned in + Required. The user instance configuration to update, + which must always include the instance configuration + name. Otherwise, only fields mentioned in [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask] need be included. To prevent conflicts of concurrent updates, @@ -931,11 +930,11 @@ async def delete_instance_config( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: - r"""Deletes the instance config. Deletion is only allowed when no - instances are using the configuration. If any instances are - using the config, returns ``FAILED_PRECONDITION``. + r"""Deletes the instance configuration. Deletion is only allowed + when no instances are using the configuration. If any instances + are using the configuration, returns ``FAILED_PRECONDITION``. - Only user managed configurations can be deleted. + Only user-managed configurations can be deleted. Authorization requires ``spanner.instanceConfigs.delete`` permission on the resource @@ -1036,9 +1035,9 @@ async def list_instance_config_operations( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListInstanceConfigOperationsAsyncPager: - r"""Lists the user-managed instance config [long-running + r"""Lists the user-managed instance configuration [long-running operations][google.longrunning.Operation] in the given project. - An instance config operation has a name of the form + An instance configuration operation has a name of the form ``projects//instanceConfigs//operations/``. The long-running operation [metadata][google.longrunning.Operation.metadata] field type @@ -1081,8 +1080,9 @@ async def sample_list_instance_config_operations(): The request object. The request for [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]. parent (:class:`str`): - Required. The project of the instance config operations. - Values are of the form ``projects/``. + Required. The project of the instance configuration + operations. Values are of the form + ``projects/``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -3164,6 +3164,172 @@ async def sample_list_instance_partition_operations(): # Done; return the response. return response + async def move_instance( + self, + request: Optional[ + Union[spanner_instance_admin.MoveInstanceRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Moves an instance to the target instance configuration. You can + use the returned [long-running + operation][google.longrunning.Operation] to track the progress + of moving the instance. + + ``MoveInstance`` returns ``FAILED_PRECONDITION`` if the instance + meets any of the following criteria: + + - Is undergoing a move to a different instance configuration + - Has backups + - Has an ongoing update + - Contains any CMEK-enabled databases + - Is a free trial instance + + While the operation is pending: + + - All other attempts to modify the instance, including changes + to its compute capacity, are rejected. + + - The following database and backup admin operations are + rejected: + + - ``DatabaseAdmin.CreateDatabase`` + - ``DatabaseAdmin.UpdateDatabaseDdl`` (disabled if + default_leader is specified in the request.) + - ``DatabaseAdmin.RestoreDatabase`` + - ``DatabaseAdmin.CreateBackup`` + - ``DatabaseAdmin.CopyBackup`` + + - Both the source and target instance configurations are + subject to hourly compute and storage charges. + + - The instance might experience higher read-write latencies and + a higher transaction abort rate. However, moving an instance + doesn't cause any downtime. + + The returned [long-running + operation][google.longrunning.Operation] has a name of the + format ``/operations/`` and can be + used to track the move instance operation. The + [metadata][google.longrunning.Operation.metadata] field type is + [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Instance][google.spanner.admin.instance.v1.Instance], if + successful. Cancelling the operation sets its metadata's + [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. + Cancellation is not immediate because it involves moving any + data previously moved to the target instance configuration back + to the original instance configuration. You can use this + operation to track the progress of the cancellation. Upon + successful completion of the cancellation, the operation + terminates with ``CANCELLED`` status. + + If not cancelled, upon completion of the returned operation: + + - The instance successfully moves to the target instance + configuration. + - You are billed for compute and storage in target instance + configuration. + + Authorization requires the ``spanner.instances.update`` + permission on the resource + [instance][google.spanner.admin.instance.v1.Instance]. + + For more details, see `Move an + instance `__. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import spanner_admin_instance_v1 + + async def sample_move_instance(): + # Create a client + client = spanner_admin_instance_v1.InstanceAdminAsyncClient() + + # Initialize request argument(s) + request = spanner_admin_instance_v1.MoveInstanceRequest( + name="name_value", + target_config="target_config_value", + ) + + # Make the request + operation = client.move_instance(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.MoveInstanceRequest, dict]]): + The request object. The request for + [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.spanner_admin_instance_v1.types.MoveInstanceResponse` The response for + [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. + + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, spanner_instance_admin.MoveInstanceRequest): + request = spanner_instance_admin.MoveInstanceRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.move_instance + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + spanner_instance_admin.MoveInstanceResponse, + metadata_type=spanner_instance_admin.MoveInstanceMetadata, + ) + + # Done; return the response. + return response + async def __aenter__(self) -> "InstanceAdminAsyncClient": return self diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py index d90d1707cd..6d767f7383 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py @@ -724,7 +724,7 @@ def __init__( transport_init: Union[ Type[InstanceAdminTransport], Callable[..., InstanceAdminTransport] ] = ( - type(self).get_transport_class(transport) + InstanceAdminClient.get_transport_class(transport) if isinstance(transport, str) or transport is None else cast(Callable[..., InstanceAdminTransport], transport) ) @@ -985,39 +985,39 @@ def create_instance_config( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: - r"""Creates an instance config and begins preparing it to be used. - The returned [long-running + r"""Creates an instance configuration and begins preparing it to be + used. The returned [long-running operation][google.longrunning.Operation] can be used to track - the progress of preparing the new instance config. The instance - config name is assigned by the caller. If the named instance - config already exists, ``CreateInstanceConfig`` returns - ``ALREADY_EXISTS``. + the progress of preparing the new instance configuration. The + instance configuration name is assigned by the caller. If the + named instance configuration already exists, + ``CreateInstanceConfig`` returns ``ALREADY_EXISTS``. Immediately after the request returns: - - The instance config is readable via the API, with all - requested attributes. The instance config's + - The instance configuration is readable via the API, with all + requested attributes. The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. Its state is ``CREATING``. While the operation is pending: - - Cancelling the operation renders the instance config + - Cancelling the operation renders the instance configuration immediately unreadable via the API. - Except for deleting the creating resource, all other attempts - to modify the instance config are rejected. + to modify the instance configuration are rejected. Upon completion of the returned operation: - Instances can be created using the instance configuration. - - The instance config's + - The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. Its state becomes ``READY``. The returned [long-running operation][google.longrunning.Operation] will have a name of the format ``/operations/`` and - can be used to track creation of the instance config. The + can be used to track creation of the instance configuration. The [metadata][google.longrunning.Operation.metadata] field type is [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. The [response][google.longrunning.Operation.response] field type @@ -1066,7 +1066,7 @@ def sample_create_instance_config(): [CreateInstanceConfigRequest][InstanceAdmin.CreateInstanceConfigRequest]. parent (str): Required. The name of the project in which to create the - instance config. Values are of the form + instance configuration. Values are of the form ``projects/``. This corresponds to the ``parent`` field @@ -1084,11 +1084,11 @@ def sample_create_instance_config(): on the ``request`` instance; if ``request`` is provided, this should not be set. instance_config_id (str): - Required. The ID of the instance config to create. Valid - identifiers are of the form + Required. The ID of the instance configuration to + create. Valid identifiers are of the form ``custom-[-a-z0-9]*[a-z0-9]`` and must be between 2 and 64 characters in length. The ``custom-`` prefix is - required to avoid name conflicts with Google managed + required to avoid name conflicts with Google-managed configurations. This corresponds to the ``instance_config_id`` field @@ -1176,16 +1176,16 @@ def update_instance_config( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: - r"""Updates an instance config. The returned [long-running + r"""Updates an instance configuration. The returned [long-running operation][google.longrunning.Operation] can be used to track the progress of updating the instance. If the named instance - config does not exist, returns ``NOT_FOUND``. + configuration does not exist, returns ``NOT_FOUND``. - Only user managed configurations can be updated. + Only user-managed configurations can be updated. Immediately after the request returns: - - The instance config's + - The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. @@ -1196,25 +1196,27 @@ def update_instance_config( The operation is guaranteed to succeed at undoing all changes, after which point it terminates with a ``CANCELLED`` status. - - All other attempts to modify the instance config are + - All other attempts to modify the instance configuration are rejected. - - Reading the instance config via the API continues to give the - pre-request values. + - Reading the instance configuration via the API continues to + give the pre-request values. Upon completion of the returned operation: - Creating instances using the instance configuration uses the new values. - - The instance config's new values are readable via the API. - - The instance config's + - The new values of the instance configuration are readable via + the API. + - The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. The returned [long-running operation][google.longrunning.Operation] will have a name of the format ``/operations/`` and - can be used to track the instance config modification. The - [metadata][google.longrunning.Operation.metadata] field type is + can be used to track the instance configuration modification. + The [metadata][google.longrunning.Operation.metadata] field type + is [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. The [response][google.longrunning.Operation.response] field type is @@ -1259,9 +1261,9 @@ def sample_update_instance_config(): The request object. The request for [UpdateInstanceConfigRequest][InstanceAdmin.UpdateInstanceConfigRequest]. instance_config (google.cloud.spanner_admin_instance_v1.types.InstanceConfig): - Required. The user instance config to update, which must - always include the instance config name. Otherwise, only - fields mentioned in + Required. The user instance configuration to update, + which must always include the instance configuration + name. Otherwise, only fields mentioned in [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask] need be included. To prevent conflicts of concurrent updates, @@ -1365,11 +1367,11 @@ def delete_instance_config( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: - r"""Deletes the instance config. Deletion is only allowed when no - instances are using the configuration. If any instances are - using the config, returns ``FAILED_PRECONDITION``. + r"""Deletes the instance configuration. Deletion is only allowed + when no instances are using the configuration. If any instances + are using the configuration, returns ``FAILED_PRECONDITION``. - Only user managed configurations can be deleted. + Only user-managed configurations can be deleted. Authorization requires ``spanner.instanceConfigs.delete`` permission on the resource @@ -1467,9 +1469,9 @@ def list_instance_config_operations( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListInstanceConfigOperationsPager: - r"""Lists the user-managed instance config [long-running + r"""Lists the user-managed instance configuration [long-running operations][google.longrunning.Operation] in the given project. - An instance config operation has a name of the form + An instance configuration operation has a name of the form ``projects//instanceConfigs//operations/``. The long-running operation [metadata][google.longrunning.Operation.metadata] field type @@ -1512,8 +1514,9 @@ def sample_list_instance_config_operations(): The request object. The request for [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]. parent (str): - Required. The project of the instance config operations. - Values are of the form ``projects/``. + Required. The project of the instance configuration + operations. Values are of the form + ``projects/``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -3572,6 +3575,170 @@ def sample_list_instance_partition_operations(): # Done; return the response. return response + def move_instance( + self, + request: Optional[ + Union[spanner_instance_admin.MoveInstanceRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Moves an instance to the target instance configuration. You can + use the returned [long-running + operation][google.longrunning.Operation] to track the progress + of moving the instance. + + ``MoveInstance`` returns ``FAILED_PRECONDITION`` if the instance + meets any of the following criteria: + + - Is undergoing a move to a different instance configuration + - Has backups + - Has an ongoing update + - Contains any CMEK-enabled databases + - Is a free trial instance + + While the operation is pending: + + - All other attempts to modify the instance, including changes + to its compute capacity, are rejected. + + - The following database and backup admin operations are + rejected: + + - ``DatabaseAdmin.CreateDatabase`` + - ``DatabaseAdmin.UpdateDatabaseDdl`` (disabled if + default_leader is specified in the request.) + - ``DatabaseAdmin.RestoreDatabase`` + - ``DatabaseAdmin.CreateBackup`` + - ``DatabaseAdmin.CopyBackup`` + + - Both the source and target instance configurations are + subject to hourly compute and storage charges. + + - The instance might experience higher read-write latencies and + a higher transaction abort rate. However, moving an instance + doesn't cause any downtime. + + The returned [long-running + operation][google.longrunning.Operation] has a name of the + format ``/operations/`` and can be + used to track the move instance operation. The + [metadata][google.longrunning.Operation.metadata] field type is + [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Instance][google.spanner.admin.instance.v1.Instance], if + successful. Cancelling the operation sets its metadata's + [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. + Cancellation is not immediate because it involves moving any + data previously moved to the target instance configuration back + to the original instance configuration. You can use this + operation to track the progress of the cancellation. Upon + successful completion of the cancellation, the operation + terminates with ``CANCELLED`` status. + + If not cancelled, upon completion of the returned operation: + + - The instance successfully moves to the target instance + configuration. + - You are billed for compute and storage in target instance + configuration. + + Authorization requires the ``spanner.instances.update`` + permission on the resource + [instance][google.spanner.admin.instance.v1.Instance]. + + For more details, see `Move an + instance `__. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import spanner_admin_instance_v1 + + def sample_move_instance(): + # Create a client + client = spanner_admin_instance_v1.InstanceAdminClient() + + # Initialize request argument(s) + request = spanner_admin_instance_v1.MoveInstanceRequest( + name="name_value", + target_config="target_config_value", + ) + + # Make the request + operation = client.move_instance(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.spanner_admin_instance_v1.types.MoveInstanceRequest, dict]): + The request object. The request for + [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.spanner_admin_instance_v1.types.MoveInstanceResponse` The response for + [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. + + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, spanner_instance_admin.MoveInstanceRequest): + request = spanner_instance_admin.MoveInstanceRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.move_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + spanner_instance_admin.MoveInstanceResponse, + metadata_type=spanner_instance_admin.MoveInstanceMetadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "InstanceAdminClient": return self diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py index ee70ea889a..5f7711559c 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/base.py @@ -297,6 +297,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.move_instance: gapic_v1.method.wrap_method( + self.move_instance, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -519,6 +524,15 @@ def list_instance_partition_operations( ]: raise NotImplementedError() + @property + def move_instance( + self, + ) -> Callable[ + [spanner_instance_admin.MoveInstanceRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py index 347688dedb..f4c1e97f09 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py @@ -345,39 +345,39 @@ def create_instance_config( ]: r"""Return a callable for the create instance config method over gRPC. - Creates an instance config and begins preparing it to be used. - The returned [long-running + Creates an instance configuration and begins preparing it to be + used. The returned [long-running operation][google.longrunning.Operation] can be used to track - the progress of preparing the new instance config. The instance - config name is assigned by the caller. If the named instance - config already exists, ``CreateInstanceConfig`` returns - ``ALREADY_EXISTS``. + the progress of preparing the new instance configuration. The + instance configuration name is assigned by the caller. If the + named instance configuration already exists, + ``CreateInstanceConfig`` returns ``ALREADY_EXISTS``. Immediately after the request returns: - - The instance config is readable via the API, with all - requested attributes. The instance config's + - The instance configuration is readable via the API, with all + requested attributes. The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. Its state is ``CREATING``. While the operation is pending: - - Cancelling the operation renders the instance config + - Cancelling the operation renders the instance configuration immediately unreadable via the API. - Except for deleting the creating resource, all other attempts - to modify the instance config are rejected. + to modify the instance configuration are rejected. Upon completion of the returned operation: - Instances can be created using the instance configuration. - - The instance config's + - The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. Its state becomes ``READY``. The returned [long-running operation][google.longrunning.Operation] will have a name of the format ``/operations/`` and - can be used to track creation of the instance config. The + can be used to track creation of the instance configuration. The [metadata][google.longrunning.Operation.metadata] field type is [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. The [response][google.longrunning.Operation.response] field type @@ -415,16 +415,16 @@ def update_instance_config( ]: r"""Return a callable for the update instance config method over gRPC. - Updates an instance config. The returned [long-running + Updates an instance configuration. The returned [long-running operation][google.longrunning.Operation] can be used to track the progress of updating the instance. If the named instance - config does not exist, returns ``NOT_FOUND``. + configuration does not exist, returns ``NOT_FOUND``. - Only user managed configurations can be updated. + Only user-managed configurations can be updated. Immediately after the request returns: - - The instance config's + - The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. @@ -435,25 +435,27 @@ def update_instance_config( The operation is guaranteed to succeed at undoing all changes, after which point it terminates with a ``CANCELLED`` status. - - All other attempts to modify the instance config are + - All other attempts to modify the instance configuration are rejected. - - Reading the instance config via the API continues to give the - pre-request values. + - Reading the instance configuration via the API continues to + give the pre-request values. Upon completion of the returned operation: - Creating instances using the instance configuration uses the new values. - - The instance config's new values are readable via the API. - - The instance config's + - The new values of the instance configuration are readable via + the API. + - The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. The returned [long-running operation][google.longrunning.Operation] will have a name of the format ``/operations/`` and - can be used to track the instance config modification. The - [metadata][google.longrunning.Operation.metadata] field type is + can be used to track the instance configuration modification. + The [metadata][google.longrunning.Operation.metadata] field type + is [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. The [response][google.longrunning.Operation.response] field type is @@ -490,11 +492,11 @@ def delete_instance_config( ]: r"""Return a callable for the delete instance config method over gRPC. - Deletes the instance config. Deletion is only allowed when no - instances are using the configuration. If any instances are - using the config, returns ``FAILED_PRECONDITION``. + Deletes the instance configuration. Deletion is only allowed + when no instances are using the configuration. If any instances + are using the configuration, returns ``FAILED_PRECONDITION``. - Only user managed configurations can be deleted. + Only user-managed configurations can be deleted. Authorization requires ``spanner.instanceConfigs.delete`` permission on the resource @@ -528,9 +530,9 @@ def list_instance_config_operations( r"""Return a callable for the list instance config operations method over gRPC. - Lists the user-managed instance config [long-running + Lists the user-managed instance configuration [long-running operations][google.longrunning.Operation] in the given project. - An instance config operation has a name of the form + An instance configuration operation has a name of the form ``projects//instanceConfigs//operations/``. The long-running operation [metadata][google.longrunning.Operation.metadata] field type @@ -1174,6 +1176,99 @@ def list_instance_partition_operations( ) return self._stubs["list_instance_partition_operations"] + @property + def move_instance( + self, + ) -> Callable[ + [spanner_instance_admin.MoveInstanceRequest], operations_pb2.Operation + ]: + r"""Return a callable for the move instance method over gRPC. + + Moves an instance to the target instance configuration. You can + use the returned [long-running + operation][google.longrunning.Operation] to track the progress + of moving the instance. + + ``MoveInstance`` returns ``FAILED_PRECONDITION`` if the instance + meets any of the following criteria: + + - Is undergoing a move to a different instance configuration + - Has backups + - Has an ongoing update + - Contains any CMEK-enabled databases + - Is a free trial instance + + While the operation is pending: + + - All other attempts to modify the instance, including changes + to its compute capacity, are rejected. + + - The following database and backup admin operations are + rejected: + + - ``DatabaseAdmin.CreateDatabase`` + - ``DatabaseAdmin.UpdateDatabaseDdl`` (disabled if + default_leader is specified in the request.) + - ``DatabaseAdmin.RestoreDatabase`` + - ``DatabaseAdmin.CreateBackup`` + - ``DatabaseAdmin.CopyBackup`` + + - Both the source and target instance configurations are + subject to hourly compute and storage charges. + + - The instance might experience higher read-write latencies and + a higher transaction abort rate. However, moving an instance + doesn't cause any downtime. + + The returned [long-running + operation][google.longrunning.Operation] has a name of the + format ``/operations/`` and can be + used to track the move instance operation. The + [metadata][google.longrunning.Operation.metadata] field type is + [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Instance][google.spanner.admin.instance.v1.Instance], if + successful. Cancelling the operation sets its metadata's + [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. + Cancellation is not immediate because it involves moving any + data previously moved to the target instance configuration back + to the original instance configuration. You can use this + operation to track the progress of the cancellation. Upon + successful completion of the cancellation, the operation + terminates with ``CANCELLED`` status. + + If not cancelled, upon completion of the returned operation: + + - The instance successfully moves to the target instance + configuration. + - You are billed for compute and storage in target instance + configuration. + + Authorization requires the ``spanner.instances.update`` + permission on the resource + [instance][google.spanner.admin.instance.v1.Instance]. + + For more details, see `Move an + instance `__. + + Returns: + Callable[[~.MoveInstanceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "move_instance" not in self._stubs: + self._stubs["move_instance"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance", + request_serializer=spanner_instance_admin.MoveInstanceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["move_instance"] + def close(self): self.grpc_channel.close() diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py index b21d57f4fa..ef480a6805 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py @@ -352,39 +352,39 @@ def create_instance_config( ]: r"""Return a callable for the create instance config method over gRPC. - Creates an instance config and begins preparing it to be used. - The returned [long-running + Creates an instance configuration and begins preparing it to be + used. The returned [long-running operation][google.longrunning.Operation] can be used to track - the progress of preparing the new instance config. The instance - config name is assigned by the caller. If the named instance - config already exists, ``CreateInstanceConfig`` returns - ``ALREADY_EXISTS``. + the progress of preparing the new instance configuration. The + instance configuration name is assigned by the caller. If the + named instance configuration already exists, + ``CreateInstanceConfig`` returns ``ALREADY_EXISTS``. Immediately after the request returns: - - The instance config is readable via the API, with all - requested attributes. The instance config's + - The instance configuration is readable via the API, with all + requested attributes. The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. Its state is ``CREATING``. While the operation is pending: - - Cancelling the operation renders the instance config + - Cancelling the operation renders the instance configuration immediately unreadable via the API. - Except for deleting the creating resource, all other attempts - to modify the instance config are rejected. + to modify the instance configuration are rejected. Upon completion of the returned operation: - Instances can be created using the instance configuration. - - The instance config's + - The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. Its state becomes ``READY``. The returned [long-running operation][google.longrunning.Operation] will have a name of the format ``/operations/`` and - can be used to track creation of the instance config. The + can be used to track creation of the instance configuration. The [metadata][google.longrunning.Operation.metadata] field type is [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. The [response][google.longrunning.Operation.response] field type @@ -423,16 +423,16 @@ def update_instance_config( ]: r"""Return a callable for the update instance config method over gRPC. - Updates an instance config. The returned [long-running + Updates an instance configuration. The returned [long-running operation][google.longrunning.Operation] can be used to track the progress of updating the instance. If the named instance - config does not exist, returns ``NOT_FOUND``. + configuration does not exist, returns ``NOT_FOUND``. - Only user managed configurations can be updated. + Only user-managed configurations can be updated. Immediately after the request returns: - - The instance config's + - The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field is set to true. @@ -443,25 +443,27 @@ def update_instance_config( The operation is guaranteed to succeed at undoing all changes, after which point it terminates with a ``CANCELLED`` status. - - All other attempts to modify the instance config are + - All other attempts to modify the instance configuration are rejected. - - Reading the instance config via the API continues to give the - pre-request values. + - Reading the instance configuration via the API continues to + give the pre-request values. Upon completion of the returned operation: - Creating instances using the instance configuration uses the new values. - - The instance config's new values are readable via the API. - - The instance config's + - The new values of the instance configuration are readable via + the API. + - The instance configuration's [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. The returned [long-running operation][google.longrunning.Operation] will have a name of the format ``/operations/`` and - can be used to track the instance config modification. The - [metadata][google.longrunning.Operation.metadata] field type is + can be used to track the instance configuration modification. + The [metadata][google.longrunning.Operation.metadata] field type + is [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. The [response][google.longrunning.Operation.response] field type is @@ -498,11 +500,11 @@ def delete_instance_config( ]: r"""Return a callable for the delete instance config method over gRPC. - Deletes the instance config. Deletion is only allowed when no - instances are using the configuration. If any instances are - using the config, returns ``FAILED_PRECONDITION``. + Deletes the instance configuration. Deletion is only allowed + when no instances are using the configuration. If any instances + are using the configuration, returns ``FAILED_PRECONDITION``. - Only user managed configurations can be deleted. + Only user-managed configurations can be deleted. Authorization requires ``spanner.instanceConfigs.delete`` permission on the resource @@ -536,9 +538,9 @@ def list_instance_config_operations( r"""Return a callable for the list instance config operations method over gRPC. - Lists the user-managed instance config [long-running + Lists the user-managed instance configuration [long-running operations][google.longrunning.Operation] in the given project. - An instance config operation has a name of the form + An instance configuration operation has a name of the form ``projects//instanceConfigs//operations/``. The long-running operation [metadata][google.longrunning.Operation.metadata] field type @@ -1188,6 +1190,100 @@ def list_instance_partition_operations( ) return self._stubs["list_instance_partition_operations"] + @property + def move_instance( + self, + ) -> Callable[ + [spanner_instance_admin.MoveInstanceRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the move instance method over gRPC. + + Moves an instance to the target instance configuration. You can + use the returned [long-running + operation][google.longrunning.Operation] to track the progress + of moving the instance. + + ``MoveInstance`` returns ``FAILED_PRECONDITION`` if the instance + meets any of the following criteria: + + - Is undergoing a move to a different instance configuration + - Has backups + - Has an ongoing update + - Contains any CMEK-enabled databases + - Is a free trial instance + + While the operation is pending: + + - All other attempts to modify the instance, including changes + to its compute capacity, are rejected. + + - The following database and backup admin operations are + rejected: + + - ``DatabaseAdmin.CreateDatabase`` + - ``DatabaseAdmin.UpdateDatabaseDdl`` (disabled if + default_leader is specified in the request.) + - ``DatabaseAdmin.RestoreDatabase`` + - ``DatabaseAdmin.CreateBackup`` + - ``DatabaseAdmin.CopyBackup`` + + - Both the source and target instance configurations are + subject to hourly compute and storage charges. + + - The instance might experience higher read-write latencies and + a higher transaction abort rate. However, moving an instance + doesn't cause any downtime. + + The returned [long-running + operation][google.longrunning.Operation] has a name of the + format ``/operations/`` and can be + used to track the move instance operation. The + [metadata][google.longrunning.Operation.metadata] field type is + [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Instance][google.spanner.admin.instance.v1.Instance], if + successful. Cancelling the operation sets its metadata's + [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. + Cancellation is not immediate because it involves moving any + data previously moved to the target instance configuration back + to the original instance configuration. You can use this + operation to track the progress of the cancellation. Upon + successful completion of the cancellation, the operation + terminates with ``CANCELLED`` status. + + If not cancelled, upon completion of the returned operation: + + - The instance successfully moves to the target instance + configuration. + - You are billed for compute and storage in target instance + configuration. + + Authorization requires the ``spanner.instances.update`` + permission on the resource + [instance][google.spanner.admin.instance.v1.Instance]. + + For more details, see `Move an + instance `__. + + Returns: + Callable[[~.MoveInstanceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "move_instance" not in self._stubs: + self._stubs["move_instance"] = self.grpc_channel.unary_unary( + "/google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance", + request_serializer=spanner_instance_admin.MoveInstanceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["move_instance"] + def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { @@ -1351,6 +1447,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.move_instance: gapic_v1.method_async.wrap_method( + self.move_instance, + default_timeout=None, + client_info=client_info, + ), } def close(self): diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py index ed152b4220..1a74f0e7f9 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py @@ -182,6 +182,14 @@ def post_list_instances(self, response): logging.log(f"Received response: {response}") return response + def pre_move_instance(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_move_instance(self, response): + logging.log(f"Received response: {response}") + return response + def pre_set_iam_policy(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -560,6 +568,29 @@ def post_list_instances( """ return response + def pre_move_instance( + self, + request: spanner_instance_admin.MoveInstanceRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[spanner_instance_admin.MoveInstanceRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for move_instance + + Override in a subclass to manipulate the request or metadata + before they are sent to the InstanceAdmin server. + """ + return request, metadata + + def post_move_instance( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for move_instance + + Override in a subclass to manipulate the response + after it is returned by the InstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_set_iam_policy( self, request: iam_policy_pb2.SetIamPolicyRequest, @@ -2285,6 +2316,100 @@ def __call__( resp = self._interceptor.post_list_instances(resp) return resp + class _MoveInstance(InstanceAdminRestStub): + def __hash__(self): + return hash("MoveInstance") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: spanner_instance_admin.MoveInstanceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the move instance method over HTTP. + + Args: + request (~.spanner_instance_admin.MoveInstanceRequest): + The request object. The request for + [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=projects/*/instances/*}:move", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_move_instance(request, metadata) + pb_request = spanner_instance_admin.MoveInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_move_instance(resp) + return resp + class _SetIamPolicy(InstanceAdminRestStub): def __hash__(self): return hash("SetIamPolicy") @@ -2988,6 +3113,16 @@ def list_instances( # In C++ this would require a dynamic_cast return self._ListInstances(self._session, self._host, self._interceptor) # type: ignore + @property + def move_instance( + self, + ) -> Callable[ + [spanner_instance_admin.MoveInstanceRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._MoveInstance(self._session, self._host, self._interceptor) # type: ignore + @property def set_iam_policy( self, diff --git a/google/cloud/spanner_admin_instance_v1/types/__init__.py b/google/cloud/spanner_admin_instance_v1/types/__init__.py index a3d1028ce9..1b9cd38032 100644 --- a/google/cloud/spanner_admin_instance_v1/types/__init__.py +++ b/google/cloud/spanner_admin_instance_v1/types/__init__.py @@ -44,6 +44,9 @@ ListInstancePartitionsResponse, ListInstancesRequest, ListInstancesResponse, + MoveInstanceMetadata, + MoveInstanceRequest, + MoveInstanceResponse, ReplicaInfo, UpdateInstanceConfigMetadata, UpdateInstanceConfigRequest, @@ -82,6 +85,9 @@ "ListInstancePartitionsResponse", "ListInstancesRequest", "ListInstancesResponse", + "MoveInstanceMetadata", + "MoveInstanceRequest", + "MoveInstanceResponse", "ReplicaInfo", "UpdateInstanceConfigMetadata", "UpdateInstanceConfigRequest", diff --git a/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py b/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py index 171bf48618..d2bb2d395b 100644 --- a/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py +++ b/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py @@ -61,6 +61,9 @@ "ListInstancePartitionsResponse", "ListInstancePartitionOperationsRequest", "ListInstancePartitionOperationsResponse", + "MoveInstanceRequest", + "MoveInstanceResponse", + "MoveInstanceMetadata", }, ) @@ -147,12 +150,15 @@ class InstanceConfig(proto.Message): A unique identifier for the instance configuration. Values are of the form ``projects//instanceConfigs/[a-z][-a-z0-9]*``. + + User instance configuration must start with ``custom-``. display_name (str): The name of this instance configuration as it appears in UIs. config_type (google.cloud.spanner_admin_instance_v1.types.InstanceConfig.Type): - Output only. Whether this instance config is - a Google or User Managed Configuration. + Output only. Whether this instance + configuration is a Google-managed or + user-managed configuration. replicas (MutableSequence[google.cloud.spanner_admin_instance_v1.types.ReplicaInfo]): The geographic placement of nodes in this instance configuration and their replication @@ -201,30 +207,31 @@ class InstanceConfig(proto.Message): etag (str): etag is used for optimistic concurrency control as a way to help prevent simultaneous - updates of a instance config from overwriting - each other. It is strongly suggested that - systems make use of the etag in the + updates of a instance configuration from + overwriting each other. It is strongly suggested + that systems make use of the etag in the read-modify-write cycle to perform instance - config updates in order to avoid race + configuration updates in order to avoid race conditions: An etag is returned in the response - which contains instance configs, and systems are - expected to put that etag in the request to - update instance config to ensure that their - change will be applied to the same version of - the instance config. - If no etag is provided in the call to update - instance config, then the existing instance - config is overwritten blindly. + which contains instance configurations, and + systems are expected to put that etag in the + request to update instance configuration to + ensure that their change is applied to the same + version of the instance configuration. If no + etag is provided in the call to update the + instance configuration, then the existing + instance configuration is overwritten blindly. leader_options (MutableSequence[str]): Allowed values of the "default_leader" schema option for databases in instances that use this instance configuration. reconciling (bool): - Output only. If true, the instance config is - being created or updated. If false, there are no - ongoing operations for the instance config. + Output only. If true, the instance + configuration is being created or updated. If + false, there are no ongoing operations for the + instance configuration. state (google.cloud.spanner_admin_instance_v1.types.InstanceConfig.State): - Output only. The current instance config - state. + Output only. The current instance configuration state. + Applicable only for ``USER_MANAGED`` configurations. """ class Type(proto.Enum): @@ -243,16 +250,17 @@ class Type(proto.Enum): USER_MANAGED = 2 class State(proto.Enum): - r"""Indicates the current state of the instance config. + r"""Indicates the current state of the instance configuration. Values: STATE_UNSPECIFIED (0): Not specified. CREATING (1): - The instance config is still being created. + The instance configuration is still being + created. READY (2): - The instance config is fully created and - ready to be used to create instances. + The instance configuration is fully created + and ready to be used to create instances. """ STATE_UNSPECIFIED = 0 CREATING = 1 @@ -310,7 +318,7 @@ class State(proto.Enum): class AutoscalingConfig(proto.Message): - r"""Autoscaling config for an instance. + r"""Autoscaling configuration for an instance. Attributes: autoscaling_limits (google.cloud.spanner_admin_instance_v1.types.AutoscalingConfig.AutoscalingLimits): @@ -521,6 +529,8 @@ class Instance(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time at which the instance was most recently updated. + edition (google.cloud.spanner_admin_instance_v1.types.Instance.Edition): + Optional. The ``Edition`` of the current instance. """ class State(proto.Enum): @@ -542,6 +552,25 @@ class State(proto.Enum): CREATING = 1 READY = 2 + class Edition(proto.Enum): + r"""The edition selected for this instance. Different editions + provide different capabilities at different price points. + + Values: + EDITION_UNSPECIFIED (0): + Edition not specified. + STANDARD (1): + Standard edition. + ENTERPRISE (2): + Enterprise edition. + ENTERPRISE_PLUS (3): + Enterprise Plus edition. + """ + EDITION_UNSPECIFIED = 0 + STANDARD = 1 + ENTERPRISE = 2 + ENTERPRISE_PLUS = 3 + name: str = proto.Field( proto.STRING, number=1, @@ -591,6 +620,11 @@ class State(proto.Enum): number=12, message=timestamp_pb2.Timestamp, ) + edition: Edition = proto.Field( + proto.ENUM, + number=20, + enum=Edition, + ) class ListInstanceConfigsRequest(proto.Message): @@ -680,14 +714,14 @@ class CreateInstanceConfigRequest(proto.Message): Attributes: parent (str): Required. The name of the project in which to create the - instance config. Values are of the form + instance configuration. Values are of the form ``projects/``. instance_config_id (str): - Required. The ID of the instance config to create. Valid - identifiers are of the form ``custom-[-a-z0-9]*[a-z0-9]`` - and must be between 2 and 64 characters in length. The - ``custom-`` prefix is required to avoid name conflicts with - Google managed configurations. + Required. The ID of the instance configuration to create. + Valid identifiers are of the form + ``custom-[-a-z0-9]*[a-z0-9]`` and must be between 2 and 64 + characters in length. The ``custom-`` prefix is required to + avoid name conflicts with Google-managed configurations. instance_config (google.cloud.spanner_admin_instance_v1.types.InstanceConfig): Required. The InstanceConfig proto of the configuration to create. instance_config.name must be @@ -726,9 +760,9 @@ class UpdateInstanceConfigRequest(proto.Message): Attributes: instance_config (google.cloud.spanner_admin_instance_v1.types.InstanceConfig): - Required. The user instance config to update, which must - always include the instance config name. Otherwise, only - fields mentioned in + Required. The user instance configuration to update, which + must always include the instance configuration name. + Otherwise, only fields mentioned in [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask] need be included. To prevent conflicts of concurrent updates, @@ -776,13 +810,14 @@ class DeleteInstanceConfigRequest(proto.Message): etag (str): Used for optimistic concurrency control as a way to help prevent simultaneous deletes of an - instance config from overwriting each other. If - not empty, the API - only deletes the instance config when the etag - provided matches the current status of the - requested instance config. Otherwise, deletes - the instance config without checking the current - status of the requested instance config. + instance configuration from overwriting each + other. If not empty, the API + only deletes the instance configuration when the + etag provided matches the current status of the + requested instance configuration. Otherwise, + deletes the instance configuration without + checking the current status of the requested + instance configuration. validate_only (bool): An option to validate, but not actually execute, a request, and provide the same @@ -809,8 +844,8 @@ class ListInstanceConfigOperationsRequest(proto.Message): Attributes: parent (str): - Required. The project of the instance config operations. - Values are of the form ``projects/``. + Required. The project of the instance configuration + operations. Values are of the form ``projects/``. filter (str): An expression that filters the list of returned operations. @@ -857,7 +892,8 @@ class ListInstanceConfigOperationsRequest(proto.Message): - The operation's metadata type is [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - - The instance config name contains "custom-config". + - The instance configuration name contains + "custom-config". - The operation started before 2021-03-28T14:50:00Z. - The operation resulted in an error. page_size (int): @@ -896,10 +932,10 @@ class ListInstanceConfigOperationsResponse(proto.Message): Attributes: operations (MutableSequence[google.longrunning.operations_pb2.Operation]): - The list of matching instance config [long-running + The list of matching instance configuration [long-running operations][google.longrunning.Operation]. Each operation's - name will be prefixed by the instance config's name. The - operation's + name will be prefixed by the name of the instance + configuration. The operation's [metadata][google.longrunning.Operation.metadata] field type ``metadata.type_url`` describes the type of the metadata. next_page_token (str): @@ -1247,7 +1283,7 @@ class CreateInstanceConfigMetadata(proto.Message): Attributes: instance_config (google.cloud.spanner_admin_instance_v1.types.InstanceConfig): - The target instance config end state. + The target instance configuration end state. progress (google.cloud.spanner_admin_instance_v1.types.OperationProgress): The progress of the [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig] @@ -1280,7 +1316,8 @@ class UpdateInstanceConfigMetadata(proto.Message): Attributes: instance_config (google.cloud.spanner_admin_instance_v1.types.InstanceConfig): - The desired instance config after updating. + The desired instance configuration after + updating. progress (google.cloud.spanner_admin_instance_v1.types.OperationProgress): The progress of the [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig] @@ -1898,4 +1935,71 @@ def raw_page(self): ) +class MoveInstanceRequest(proto.Message): + r"""The request for + [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. + + Attributes: + name (str): + Required. The instance to move. Values are of the form + ``projects//instances/``. + target_config (str): + Required. The target instance configuration where to move + the instance. Values are of the form + ``projects//instanceConfigs/``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + target_config: str = proto.Field( + proto.STRING, + number=2, + ) + + +class MoveInstanceResponse(proto.Message): + r"""The response for + [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. + + """ + + +class MoveInstanceMetadata(proto.Message): + r"""Metadata type for the operation returned by + [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]. + + Attributes: + target_config (str): + The target instance configuration where to move the + instance. Values are of the form + ``projects//instanceConfigs/``. + progress (google.cloud.spanner_admin_instance_v1.types.OperationProgress): + The progress of the + [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance] + operation. + [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent] + is reset when cancellation is requested. + cancel_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation was + cancelled. + """ + + target_config: str = proto.Field( + proto.STRING, + number=1, + ) + progress: common.OperationProgress = proto.Field( + proto.MESSAGE, + number=2, + message=common.OperationProgress, + ) + cancel_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_v1/services/spanner/async_client.py b/google/cloud/spanner_v1/services/spanner/async_client.py index e1c6271710..992a74503c 100644 --- a/google/cloud/spanner_v1/services/spanner/async_client.py +++ b/google/cloud/spanner_v1/services/spanner/async_client.py @@ -14,7 +14,6 @@ # limitations under the License. # from collections import OrderedDict -import functools import re from typing import ( Dict, @@ -194,9 +193,7 @@ def universe_domain(self) -> str: """ return self._client._universe_domain - get_transport_class = functools.partial( - type(SpannerClient).get_transport_class, type(SpannerClient) - ) + get_transport_class = SpannerClient.get_transport_class def __init__( self, diff --git a/google/cloud/spanner_v1/services/spanner/client.py b/google/cloud/spanner_v1/services/spanner/client.py index 7a07fe86c1..96b90bb21c 100644 --- a/google/cloud/spanner_v1/services/spanner/client.py +++ b/google/cloud/spanner_v1/services/spanner/client.py @@ -690,7 +690,7 @@ def __init__( transport_init: Union[ Type[SpannerTransport], Callable[..., SpannerTransport] ] = ( - type(self).get_transport_class(transport) + SpannerClient.get_transport_class(transport) if isinstance(transport, str) or transport is None else cast(Callable[..., SpannerTransport], transport) ) diff --git a/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json index 1eab73422e..86a6b4fa78 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-spanner-admin-database", - "version": "3.48.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json index 1ae7294c61..ac2f8c24ec 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-spanner-admin-instance", - "version": "3.48.0" + "version": "0.1.0" }, "snippets": [ { @@ -2456,6 +2456,159 @@ ], "title": "spanner_v1_generated_instance_admin_list_instances_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.spanner_admin_instance_v1.InstanceAdminAsyncClient", + "shortName": "InstanceAdminAsyncClient" + }, + "fullName": "google.cloud.spanner_admin_instance_v1.InstanceAdminAsyncClient.move_instance", + "method": { + "fullName": "google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance", + "service": { + "fullName": "google.spanner.admin.instance.v1.InstanceAdmin", + "shortName": "InstanceAdmin" + }, + "shortName": "MoveInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.spanner_admin_instance_v1.types.MoveInstanceRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "move_instance" + }, + "description": "Sample for MoveInstance", + "file": "spanner_v1_generated_instance_admin_move_instance_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "spanner_v1_generated_InstanceAdmin_MoveInstance_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "spanner_v1_generated_instance_admin_move_instance_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.spanner_admin_instance_v1.InstanceAdminClient", + "shortName": "InstanceAdminClient" + }, + "fullName": "google.cloud.spanner_admin_instance_v1.InstanceAdminClient.move_instance", + "method": { + "fullName": "google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance", + "service": { + "fullName": "google.spanner.admin.instance.v1.InstanceAdmin", + "shortName": "InstanceAdmin" + }, + "shortName": "MoveInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.spanner_admin_instance_v1.types.MoveInstanceRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "move_instance" + }, + "description": "Sample for MoveInstance", + "file": "spanner_v1_generated_instance_admin_move_instance_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "spanner_v1_generated_InstanceAdmin_MoveInstance_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "spanner_v1_generated_instance_admin_move_instance_sync.py" + }, { "canonical": true, "clientMethod": { diff --git a/samples/generated_samples/snippet_metadata_google.spanner.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.v1.json index 70e86962ed..4384d19e2a 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-spanner", - "version": "3.48.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/samples/generated_samples/spanner_v1_generated_instance_admin_move_instance_async.py b/samples/generated_samples/spanner_v1_generated_instance_admin_move_instance_async.py new file mode 100644 index 0000000000..6530706620 --- /dev/null +++ b/samples/generated_samples/spanner_v1_generated_instance_admin_move_instance_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MoveInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-spanner-admin-instance + + +# [START spanner_v1_generated_InstanceAdmin_MoveInstance_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import spanner_admin_instance_v1 + + +async def sample_move_instance(): + # Create a client + client = spanner_admin_instance_v1.InstanceAdminAsyncClient() + + # Initialize request argument(s) + request = spanner_admin_instance_v1.MoveInstanceRequest( + name="name_value", + target_config="target_config_value", + ) + + # Make the request + operation = client.move_instance(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END spanner_v1_generated_InstanceAdmin_MoveInstance_async] diff --git a/samples/generated_samples/spanner_v1_generated_instance_admin_move_instance_sync.py b/samples/generated_samples/spanner_v1_generated_instance_admin_move_instance_sync.py new file mode 100644 index 0000000000..32d1c4f5b1 --- /dev/null +++ b/samples/generated_samples/spanner_v1_generated_instance_admin_move_instance_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for MoveInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-spanner-admin-instance + + +# [START spanner_v1_generated_InstanceAdmin_MoveInstance_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import spanner_admin_instance_v1 + + +def sample_move_instance(): + # Create a client + client = spanner_admin_instance_v1.InstanceAdminClient() + + # Initialize request argument(s) + request = spanner_admin_instance_v1.MoveInstanceRequest( + name="name_value", + target_config="target_config_value", + ) + + # Make the request + operation = client.move_instance(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END spanner_v1_generated_InstanceAdmin_MoveInstance_sync] diff --git a/scripts/fixup_spanner_admin_instance_v1_keywords.py b/scripts/fixup_spanner_admin_instance_v1_keywords.py index 321014ad94..3b5fa8afb6 100644 --- a/scripts/fixup_spanner_admin_instance_v1_keywords.py +++ b/scripts/fixup_spanner_admin_instance_v1_keywords.py @@ -54,6 +54,7 @@ class spanner_admin_instanceCallTransformer(cst.CSTTransformer): 'list_instance_partition_operations': ('parent', 'filter', 'page_size', 'page_token', 'instance_partition_deadline', ), 'list_instance_partitions': ('parent', 'page_size', 'page_token', 'instance_partition_deadline', ), 'list_instances': ('parent', 'page_size', 'page_token', 'filter', 'instance_deadline', ), + 'move_instance': ('name', 'target_config', ), 'set_iam_policy': ('resource', 'policy', 'update_mask', ), 'test_iam_permissions': ('resource', 'permissions', ), 'update_instance': ('instance', 'field_mask', ), diff --git a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py index ce196a15f8..bdec708615 100644 --- a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py +++ b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py @@ -1312,22 +1312,23 @@ async def test_list_databases_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_databases - ] = mock_object + ] = mock_rpc request = {} await client.list_databases(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_databases(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1817,8 +1818,9 @@ def test_create_database_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_database(request) @@ -1872,26 +1874,28 @@ async def test_create_database_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_database - ] = mock_object + ] = mock_rpc request = {} await client.create_database(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_database(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2266,22 +2270,23 @@ async def test_get_database_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_database - ] = mock_object + ] = mock_rpc request = {} await client.get_database(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_database(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2583,8 +2588,9 @@ def test_update_database_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_database(request) @@ -2638,26 +2644,28 @@ async def test_update_database_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_database - ] = mock_object + ] = mock_rpc request = {} await client.update_database(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_database(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2970,8 +2978,9 @@ def test_update_database_ddl_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_database_ddl(request) @@ -3027,26 +3036,28 @@ async def test_update_database_ddl_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_database_ddl - ] = mock_object + ] = mock_rpc request = {} await client.update_database_ddl(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_database_ddl(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3406,22 +3417,23 @@ async def test_drop_database_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.drop_database - ] = mock_object + ] = mock_rpc request = {} await client.drop_database(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.drop_database(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3767,22 +3779,23 @@ async def test_get_database_ddl_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_database_ddl - ] = mock_object + ] = mock_rpc request = {} await client.get_database_ddl(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_database_ddl(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4137,22 +4150,23 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.set_iam_policy - ] = mock_object + ] = mock_rpc request = {} await client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.set_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4520,22 +4534,23 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_iam_policy - ] = mock_object + ] = mock_rpc request = {} await client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4911,22 +4926,23 @@ async def test_test_iam_permissions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.test_iam_permissions - ] = mock_object + ] = mock_rpc request = {} await client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.test_iam_permissions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5261,8 +5277,9 @@ def test_create_backup_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_backup(request) @@ -5316,26 +5333,28 @@ async def test_create_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_backup - ] = mock_object + ] = mock_rpc request = {} await client.create_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5649,8 +5668,9 @@ def test_copy_backup_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.copy_backup(request) @@ -5704,26 +5724,28 @@ async def test_copy_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.copy_backup - ] = mock_object + ] = mock_rpc request = {} await client.copy_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.copy_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5959,11 +5981,14 @@ def test_get_backup(request_type, transport: str = "grpc"): database="database_value", name="name_value", size_bytes=1089, + freeable_size_bytes=2006, + exclusive_size_bytes=2168, state=backup.Backup.State.CREATING, referencing_databases=["referencing_databases_value"], database_dialect=common.DatabaseDialect.GOOGLE_STANDARD_SQL, referencing_backups=["referencing_backups_value"], backup_schedules=["backup_schedules_value"], + incremental_backup_chain_id="incremental_backup_chain_id_value", ) response = client.get_backup(request) @@ -5978,11 +6003,14 @@ def test_get_backup(request_type, transport: str = "grpc"): assert response.database == "database_value" assert response.name == "name_value" assert response.size_bytes == 1089 + assert response.freeable_size_bytes == 2006 + assert response.exclusive_size_bytes == 2168 assert response.state == backup.Backup.State.CREATING assert response.referencing_databases == ["referencing_databases_value"] assert response.database_dialect == common.DatabaseDialect.GOOGLE_STANDARD_SQL assert response.referencing_backups == ["referencing_backups_value"] assert response.backup_schedules == ["backup_schedules_value"] + assert response.incremental_backup_chain_id == "incremental_backup_chain_id_value" def test_get_backup_empty_call(): @@ -6084,11 +6112,14 @@ async def test_get_backup_empty_call_async(): database="database_value", name="name_value", size_bytes=1089, + freeable_size_bytes=2006, + exclusive_size_bytes=2168, state=backup.Backup.State.CREATING, referencing_databases=["referencing_databases_value"], database_dialect=common.DatabaseDialect.GOOGLE_STANDARD_SQL, referencing_backups=["referencing_backups_value"], backup_schedules=["backup_schedules_value"], + incremental_backup_chain_id="incremental_backup_chain_id_value", ) ) response = await client.get_backup() @@ -6118,22 +6149,23 @@ async def test_get_backup_async_use_cached_wrapped_rpc(transport: str = "grpc_as ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_backup - ] = mock_object + ] = mock_rpc request = {} await client.get_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6157,11 +6189,14 @@ async def test_get_backup_async( database="database_value", name="name_value", size_bytes=1089, + freeable_size_bytes=2006, + exclusive_size_bytes=2168, state=backup.Backup.State.CREATING, referencing_databases=["referencing_databases_value"], database_dialect=common.DatabaseDialect.GOOGLE_STANDARD_SQL, referencing_backups=["referencing_backups_value"], backup_schedules=["backup_schedules_value"], + incremental_backup_chain_id="incremental_backup_chain_id_value", ) ) response = await client.get_backup(request) @@ -6177,11 +6212,14 @@ async def test_get_backup_async( assert response.database == "database_value" assert response.name == "name_value" assert response.size_bytes == 1089 + assert response.freeable_size_bytes == 2006 + assert response.exclusive_size_bytes == 2168 assert response.state == backup.Backup.State.CREATING assert response.referencing_databases == ["referencing_databases_value"] assert response.database_dialect == common.DatabaseDialect.GOOGLE_STANDARD_SQL assert response.referencing_backups == ["referencing_backups_value"] assert response.backup_schedules == ["backup_schedules_value"] + assert response.incremental_backup_chain_id == "incremental_backup_chain_id_value" @pytest.mark.asyncio @@ -6352,11 +6390,14 @@ def test_update_backup(request_type, transport: str = "grpc"): database="database_value", name="name_value", size_bytes=1089, + freeable_size_bytes=2006, + exclusive_size_bytes=2168, state=gsad_backup.Backup.State.CREATING, referencing_databases=["referencing_databases_value"], database_dialect=common.DatabaseDialect.GOOGLE_STANDARD_SQL, referencing_backups=["referencing_backups_value"], backup_schedules=["backup_schedules_value"], + incremental_backup_chain_id="incremental_backup_chain_id_value", ) response = client.update_backup(request) @@ -6371,11 +6412,14 @@ def test_update_backup(request_type, transport: str = "grpc"): assert response.database == "database_value" assert response.name == "name_value" assert response.size_bytes == 1089 + assert response.freeable_size_bytes == 2006 + assert response.exclusive_size_bytes == 2168 assert response.state == gsad_backup.Backup.State.CREATING assert response.referencing_databases == ["referencing_databases_value"] assert response.database_dialect == common.DatabaseDialect.GOOGLE_STANDARD_SQL assert response.referencing_backups == ["referencing_backups_value"] assert response.backup_schedules == ["backup_schedules_value"] + assert response.incremental_backup_chain_id == "incremental_backup_chain_id_value" def test_update_backup_empty_call(): @@ -6473,11 +6517,14 @@ async def test_update_backup_empty_call_async(): database="database_value", name="name_value", size_bytes=1089, + freeable_size_bytes=2006, + exclusive_size_bytes=2168, state=gsad_backup.Backup.State.CREATING, referencing_databases=["referencing_databases_value"], database_dialect=common.DatabaseDialect.GOOGLE_STANDARD_SQL, referencing_backups=["referencing_backups_value"], backup_schedules=["backup_schedules_value"], + incremental_backup_chain_id="incremental_backup_chain_id_value", ) ) response = await client.update_backup() @@ -6509,22 +6556,23 @@ async def test_update_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_backup - ] = mock_object + ] = mock_rpc request = {} await client.update_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.update_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6548,11 +6596,14 @@ async def test_update_backup_async( database="database_value", name="name_value", size_bytes=1089, + freeable_size_bytes=2006, + exclusive_size_bytes=2168, state=gsad_backup.Backup.State.CREATING, referencing_databases=["referencing_databases_value"], database_dialect=common.DatabaseDialect.GOOGLE_STANDARD_SQL, referencing_backups=["referencing_backups_value"], backup_schedules=["backup_schedules_value"], + incremental_backup_chain_id="incremental_backup_chain_id_value", ) ) response = await client.update_backup(request) @@ -6568,11 +6619,14 @@ async def test_update_backup_async( assert response.database == "database_value" assert response.name == "name_value" assert response.size_bytes == 1089 + assert response.freeable_size_bytes == 2006 + assert response.exclusive_size_bytes == 2168 assert response.state == gsad_backup.Backup.State.CREATING assert response.referencing_databases == ["referencing_databases_value"] assert response.database_dialect == common.DatabaseDialect.GOOGLE_STANDARD_SQL assert response.referencing_backups == ["referencing_backups_value"] assert response.backup_schedules == ["backup_schedules_value"] + assert response.incremental_backup_chain_id == "incremental_backup_chain_id_value" @pytest.mark.asyncio @@ -6886,22 +6940,23 @@ async def test_delete_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_backup - ] = mock_object + ] = mock_rpc request = {} await client.delete_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7245,22 +7300,23 @@ async def test_list_backups_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_backups - ] = mock_object + ] = mock_rpc request = {} await client.list_backups(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_backups(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7753,8 +7809,9 @@ def test_restore_database_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.restore_database(request) @@ -7808,26 +7865,28 @@ async def test_restore_database_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.restore_database - ] = mock_object + ] = mock_rpc request = {} await client.restore_database(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.restore_database(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -8207,22 +8266,23 @@ async def test_list_database_operations_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_database_operations - ] = mock_object + ] = mock_rpc request = {} await client.list_database_operations(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_database_operations(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -8803,22 +8863,23 @@ async def test_list_backup_operations_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_backup_operations - ] = mock_object + ] = mock_rpc request = {} await client.list_backup_operations(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_backup_operations(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -9393,22 +9454,23 @@ async def test_list_database_roles_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_database_roles - ] = mock_object + ] = mock_rpc request = {} await client.list_database_roles(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_database_roles(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -9987,22 +10049,23 @@ async def test_create_backup_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_backup_schedule - ] = mock_object + ] = mock_rpc request = {} await client.create_backup_schedule(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.create_backup_schedule(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -10394,22 +10457,23 @@ async def test_get_backup_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_backup_schedule - ] = mock_object + ] = mock_rpc request = {} await client.get_backup_schedule(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_backup_schedule(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -10778,22 +10842,23 @@ async def test_update_backup_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_backup_schedule - ] = mock_object + ] = mock_rpc request = {} await client.update_backup_schedule(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.update_backup_schedule(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -11169,22 +11234,23 @@ async def test_delete_backup_schedule_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_backup_schedule - ] = mock_object + ] = mock_rpc request = {} await client.delete_backup_schedule(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_backup_schedule(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -11550,22 +11616,23 @@ async def test_list_backup_schedules_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_backup_schedules - ] = mock_object + ] = mock_rpc request = {} await client.list_backup_schedules(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_backup_schedules(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -15251,6 +15318,8 @@ def test_create_backup_rest(request_type): "name": "name_value", "create_time": {}, "size_bytes": 1089, + "freeable_size_bytes": 2006, + "exclusive_size_bytes": 2168, "state": 1, "referencing_databases": [ "referencing_databases_value1", @@ -15278,6 +15347,8 @@ def test_create_backup_rest(request_type): ], "max_expire_time": {}, "backup_schedules": ["backup_schedules_value1", "backup_schedules_value2"], + "incremental_backup_chain_id": "incremental_backup_chain_id_value", + "oldest_version_time": {}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -16012,11 +16083,14 @@ def test_get_backup_rest(request_type): database="database_value", name="name_value", size_bytes=1089, + freeable_size_bytes=2006, + exclusive_size_bytes=2168, state=backup.Backup.State.CREATING, referencing_databases=["referencing_databases_value"], database_dialect=common.DatabaseDialect.GOOGLE_STANDARD_SQL, referencing_backups=["referencing_backups_value"], backup_schedules=["backup_schedules_value"], + incremental_backup_chain_id="incremental_backup_chain_id_value", ) # Wrap the value into a proper Response obj @@ -16035,11 +16109,14 @@ def test_get_backup_rest(request_type): assert response.database == "database_value" assert response.name == "name_value" assert response.size_bytes == 1089 + assert response.freeable_size_bytes == 2006 + assert response.exclusive_size_bytes == 2168 assert response.state == backup.Backup.State.CREATING assert response.referencing_databases == ["referencing_databases_value"] assert response.database_dialect == common.DatabaseDialect.GOOGLE_STANDARD_SQL assert response.referencing_backups == ["referencing_backups_value"] assert response.backup_schedules == ["backup_schedules_value"] + assert response.incremental_backup_chain_id == "incremental_backup_chain_id_value" def test_get_backup_rest_use_cached_wrapped_rpc(): @@ -16322,6 +16399,8 @@ def test_update_backup_rest(request_type): "name": "projects/sample1/instances/sample2/backups/sample3", "create_time": {}, "size_bytes": 1089, + "freeable_size_bytes": 2006, + "exclusive_size_bytes": 2168, "state": 1, "referencing_databases": [ "referencing_databases_value1", @@ -16349,6 +16428,8 @@ def test_update_backup_rest(request_type): ], "max_expire_time": {}, "backup_schedules": ["backup_schedules_value1", "backup_schedules_value2"], + "incremental_backup_chain_id": "incremental_backup_chain_id_value", + "oldest_version_time": {}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -16426,11 +16507,14 @@ def get_message_fields(field): database="database_value", name="name_value", size_bytes=1089, + freeable_size_bytes=2006, + exclusive_size_bytes=2168, state=gsad_backup.Backup.State.CREATING, referencing_databases=["referencing_databases_value"], database_dialect=common.DatabaseDialect.GOOGLE_STANDARD_SQL, referencing_backups=["referencing_backups_value"], backup_schedules=["backup_schedules_value"], + incremental_backup_chain_id="incremental_backup_chain_id_value", ) # Wrap the value into a proper Response obj @@ -16449,11 +16533,14 @@ def get_message_fields(field): assert response.database == "database_value" assert response.name == "name_value" assert response.size_bytes == 1089 + assert response.freeable_size_bytes == 2006 + assert response.exclusive_size_bytes == 2168 assert response.state == gsad_backup.Backup.State.CREATING assert response.referencing_databases == ["referencing_databases_value"] assert response.database_dialect == common.DatabaseDialect.GOOGLE_STANDARD_SQL assert response.referencing_backups == ["referencing_backups_value"] assert response.backup_schedules == ["backup_schedules_value"] + assert response.incremental_backup_chain_id == "incremental_backup_chain_id_value" def test_update_backup_rest_use_cached_wrapped_rpc(): @@ -18890,6 +18977,7 @@ def test_create_backup_schedule_rest(request_type): "kms_key_names": ["kms_key_names_value1", "kms_key_names_value2"], }, "full_backup_spec": {}, + "incremental_backup_spec": {}, "update_time": {"seconds": 751, "nanos": 543}, } # The version of a generated dependency at test runtime may differ from the version used during generation. @@ -19634,6 +19722,7 @@ def test_update_backup_schedule_rest(request_type): "kms_key_names": ["kms_key_names_value1", "kms_key_names_value2"], }, "full_backup_spec": {}, + "incremental_backup_spec": {}, "update_time": {"seconds": 751, "nanos": 543}, } # The version of a generated dependency at test runtime may differ from the version used during generation. diff --git a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py index 4550c4a585..e150adcf1c 100644 --- a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py +++ b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py @@ -1314,22 +1314,23 @@ async def test_list_instance_configs_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_instance_configs - ] = mock_object + ] = mock_rpc request = {} await client.list_instance_configs(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_instance_configs(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1931,22 +1932,23 @@ async def test_get_instance_config_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_instance_config - ] = mock_object + ] = mock_rpc request = {} await client.get_instance_config(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_instance_config(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2280,8 +2282,9 @@ def test_create_instance_config_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_instance_config(request) @@ -2337,26 +2340,28 @@ async def test_create_instance_config_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_instance_config - ] = mock_object + ] = mock_rpc request = {} await client.create_instance_config(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_instance_config(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2684,8 +2689,9 @@ def test_update_instance_config_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_instance_config(request) @@ -2741,26 +2747,28 @@ async def test_update_instance_config_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_instance_config - ] = mock_object + ] = mock_rpc request = {} await client.update_instance_config(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_instance_config(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3135,22 +3143,23 @@ async def test_delete_instance_config_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_instance_config - ] = mock_object + ] = mock_rpc request = {} await client.delete_instance_config(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_instance_config(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3518,22 +3527,23 @@ async def test_list_instance_config_operations_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_instance_config_operations - ] = mock_object + ] = mock_rpc request = {} await client.list_instance_config_operations(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_instance_config_operations(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4110,22 +4120,23 @@ async def test_list_instances_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_instances - ] = mock_object + ] = mock_rpc request = {} await client.list_instances(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_instances(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4689,22 +4700,23 @@ async def test_list_instance_partitions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_instance_partitions - ] = mock_object + ] = mock_rpc request = {} await client.list_instance_partitions(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_instance_partitions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5138,6 +5150,7 @@ def test_get_instance(request_type, transport: str = "grpc"): processing_units=1743, state=spanner_instance_admin.Instance.State.CREATING, endpoint_uris=["endpoint_uris_value"], + edition=spanner_instance_admin.Instance.Edition.STANDARD, ) response = client.get_instance(request) @@ -5156,6 +5169,7 @@ def test_get_instance(request_type, transport: str = "grpc"): assert response.processing_units == 1743 assert response.state == spanner_instance_admin.Instance.State.CREATING assert response.endpoint_uris == ["endpoint_uris_value"] + assert response.edition == spanner_instance_admin.Instance.Edition.STANDARD def test_get_instance_empty_call(): @@ -5261,6 +5275,7 @@ async def test_get_instance_empty_call_async(): processing_units=1743, state=spanner_instance_admin.Instance.State.CREATING, endpoint_uris=["endpoint_uris_value"], + edition=spanner_instance_admin.Instance.Edition.STANDARD, ) ) response = await client.get_instance() @@ -5292,22 +5307,23 @@ async def test_get_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_instance - ] = mock_object + ] = mock_rpc request = {} await client.get_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5336,6 +5352,7 @@ async def test_get_instance_async( processing_units=1743, state=spanner_instance_admin.Instance.State.CREATING, endpoint_uris=["endpoint_uris_value"], + edition=spanner_instance_admin.Instance.Edition.STANDARD, ) ) response = await client.get_instance(request) @@ -5355,6 +5372,7 @@ async def test_get_instance_async( assert response.processing_units == 1743 assert response.state == spanner_instance_admin.Instance.State.CREATING assert response.endpoint_uris == ["endpoint_uris_value"] + assert response.edition == spanner_instance_admin.Instance.Edition.STANDARD @pytest.mark.asyncio @@ -5615,8 +5633,9 @@ def test_create_instance_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_instance(request) @@ -5670,26 +5689,28 @@ async def test_create_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_instance - ] = mock_object + ] = mock_rpc request = {} await client.create_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5996,8 +6017,9 @@ def test_update_instance_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_instance(request) @@ -6051,26 +6073,28 @@ async def test_update_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_instance - ] = mock_object + ] = mock_rpc request = {} await client.update_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6420,22 +6444,23 @@ async def test_delete_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_instance - ] = mock_object + ] = mock_rpc request = {} await client.delete_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6779,22 +6804,23 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.set_iam_policy - ] = mock_object + ] = mock_rpc request = {} await client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.set_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7162,22 +7188,23 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_iam_policy - ] = mock_object + ] = mock_rpc request = {} await client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7553,22 +7580,23 @@ async def test_test_iam_permissions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.test_iam_permissions - ] = mock_object + ] = mock_rpc request = {} await client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.test_iam_permissions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7989,22 +8017,23 @@ async def test_get_instance_partition_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_instance_partition - ] = mock_object + ] = mock_rpc request = {} await client.get_instance_partition(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_instance_partition(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -8333,8 +8362,9 @@ def test_create_instance_partition_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_instance_partition(request) @@ -8390,26 +8420,28 @@ async def test_create_instance_partition_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_instance_partition - ] = mock_object + ] = mock_rpc request = {} await client.create_instance_partition(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_instance_partition(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -8802,22 +8834,23 @@ async def test_delete_instance_partition_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_instance_partition - ] = mock_object + ] = mock_rpc request = {} await client.delete_instance_partition(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_instance_partition(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -9119,8 +9152,9 @@ def test_update_instance_partition_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_instance_partition(request) @@ -9176,26 +9210,28 @@ async def test_update_instance_partition_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_instance_partition - ] = mock_object + ] = mock_rpc request = {} await client.update_instance_partition(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_instance_partition(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -9602,22 +9638,23 @@ async def test_list_instance_partition_operations_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_instance_partition_operations - ] = mock_object + ] = mock_rpc request = {} await client.list_instance_partition_operations(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_instance_partition_operations(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -10032,52 +10069,92 @@ async def test_list_instance_partition_operations_async_pages(): @pytest.mark.parametrize( "request_type", [ - spanner_instance_admin.ListInstanceConfigsRequest, + spanner_instance_admin.MoveInstanceRequest, dict, ], ) -def test_list_instance_configs_rest(request_type): +def test_move_instance(request_type, transport: str = "grpc"): client = InstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = spanner_instance_admin.ListInstanceConfigsResponse( - next_page_token="next_page_token_value", - ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.move_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.move_instance(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = spanner_instance_admin.ListInstanceConfigsResponse.pb( - return_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = spanner_instance_admin.MoveInstanceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_move_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.move_instance), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - json_return_value = json_format.MessageToJson(return_value) + client.move_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == spanner_instance_admin.MoveInstanceRequest() - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_instance_configs(request) - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListInstanceConfigsPager) - assert response.next_page_token == "next_page_token_value" +def test_move_instance_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = InstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = spanner_instance_admin.MoveInstanceRequest( + name="name_value", + target_config="target_config_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.move_instance), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.move_instance(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == spanner_instance_admin.MoveInstanceRequest( + name="name_value", + target_config="target_config_value", + ) -def test_list_instance_configs_rest_use_cached_wrapped_rpc(): +def test_move_instance_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = InstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -10085,76 +10162,324 @@ def test_list_instance_configs_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.list_instance_configs - in client._transport._wrapped_methods - ) + assert client._transport.move_instance in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.list_instance_configs - ] = mock_rpc - + client._transport._wrapped_methods[client._transport.move_instance] = mock_rpc request = {} - client.list_instance_configs(request) + client.move_instance(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_instance_configs(request) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.move_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_instance_configs_rest_required_fields( - request_type=spanner_instance_admin.ListInstanceConfigsRequest, -): - transport_class = transports.InstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) +@pytest.mark.asyncio +async def test_move_instance_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = InstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", ) - # verify fields with default values are dropped + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.move_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.move_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == spanner_instance_admin.MoveInstanceRequest() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_instance_configs._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - # verify required fields with default values are now present +@pytest.mark.asyncio +async def test_move_instance_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = InstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) - jsonified_request["parent"] = "parent_value" + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_instance_configs._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", + # Ensure method has been cached + assert ( + client._client._transport.move_instance + in client._client._transport._wrapped_methods ) - ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.move_instance + ] = mock_rpc - client = InstanceAdminClient( + request = {} + await client.move_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.move_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_move_instance_async( + transport: str = "grpc_asyncio", + request_type=spanner_instance_admin.MoveInstanceRequest, +): + client = InstanceAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.move_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.move_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = spanner_instance_admin.MoveInstanceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_move_instance_async_from_dict(): + await test_move_instance_async(request_type=dict) + + +def test_move_instance_field_headers(): + client = InstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.MoveInstanceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.move_instance), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.move_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_move_instance_field_headers_async(): + client = InstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner_instance_admin.MoveInstanceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.move_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.move_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.parametrize( + "request_type", + [ + spanner_instance_admin.ListInstanceConfigsRequest, + dict, + ], +) +def test_list_instance_configs_rest(request_type): + client = InstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = spanner_instance_admin.ListInstanceConfigsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = spanner_instance_admin.ListInstanceConfigsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_instance_configs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListInstanceConfigsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_instance_configs_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = InstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_instance_configs + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_instance_configs + ] = mock_rpc + + request = {} + client.list_instance_configs(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_instance_configs(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_instance_configs_rest_required_fields( + request_type=spanner_instance_admin.ListInstanceConfigsRequest, +): + transport_class = transports.InstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_instance_configs._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_instance_configs._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = InstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) request = request_type(**request_init) @@ -12893,6 +13218,7 @@ def test_get_instance_rest(request_type): processing_units=1743, state=spanner_instance_admin.Instance.State.CREATING, endpoint_uris=["endpoint_uris_value"], + edition=spanner_instance_admin.Instance.Edition.STANDARD, ) # Wrap the value into a proper Response obj @@ -12915,6 +13241,7 @@ def test_get_instance_rest(request_type): assert response.processing_units == 1743 assert response.state == spanner_instance_admin.Instance.State.CREATING assert response.endpoint_uris == ["endpoint_uris_value"] + assert response.edition == spanner_instance_admin.Instance.Edition.STANDARD def test_get_instance_rest_use_cached_wrapped_rpc(): @@ -16691,6 +17018,263 @@ def test_list_instance_partition_operations_rest_pager(transport: str = "rest"): assert page_.raw_page.next_page_token == token +@pytest.mark.parametrize( + "request_type", + [ + spanner_instance_admin.MoveInstanceRequest, + dict, + ], +) +def test_move_instance_rest(request_type): + client = InstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.move_instance(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_move_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = InstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.move_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.move_instance] = mock_rpc + + request = {} + client.move_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.move_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_move_instance_rest_required_fields( + request_type=spanner_instance_admin.MoveInstanceRequest, +): + transport_class = transports.InstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request_init["target_config"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).move_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["targetConfig"] = "target_config_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).move_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "targetConfig" in jsonified_request + assert jsonified_request["targetConfig"] == "target_config_value" + + client = InstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.move_instance(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_move_instance_rest_unset_required_fields(): + transport = transports.InstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.move_instance._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "targetConfig", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_move_instance_rest_interceptors(null_interceptor): + transport = transports.InstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.InstanceAdminRestInterceptor(), + ) + client = InstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.InstanceAdminRestInterceptor, "post_move_instance" + ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, "pre_move_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = spanner_instance_admin.MoveInstanceRequest.pb( + spanner_instance_admin.MoveInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = spanner_instance_admin.MoveInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.move_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_move_instance_rest_bad_request( + transport: str = "rest", request_type=spanner_instance_admin.MoveInstanceRequest +): + client = InstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.move_instance(request) + + +def test_move_instance_rest_error(): + client = InstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.InstanceAdminGrpcTransport( @@ -16850,6 +17434,7 @@ def test_instance_admin_base_transport(): "delete_instance_partition", "update_instance_partition", "list_instance_partition_operations", + "move_instance", ) for method in methods: with pytest.raises(NotImplementedError): @@ -17202,6 +17787,9 @@ def test_instance_admin_client_transport_session_collision(transport_name): session1 = client1.transport.list_instance_partition_operations._session session2 = client2.transport.list_instance_partition_operations._session assert session1 != session2 + session1 = client1.transport.move_instance._session + session2 = client2.transport.move_instance._session + assert session1 != session2 def test_instance_admin_grpc_transport_channel(): diff --git a/tests/unit/gapic/spanner_v1/test_spanner.py b/tests/unit/gapic/spanner_v1/test_spanner.py index 70ba97827e..d49f450e86 100644 --- a/tests/unit/gapic/spanner_v1/test_spanner.py +++ b/tests/unit/gapic/spanner_v1/test_spanner.py @@ -1236,22 +1236,23 @@ async def test_create_session_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_session - ] = mock_object + ] = mock_rpc request = {} await client.create_session(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.create_session(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1608,22 +1609,23 @@ async def test_batch_create_sessions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.batch_create_sessions - ] = mock_object + ] = mock_rpc request = {} await client.batch_create_sessions(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.batch_create_sessions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1995,22 +1997,23 @@ async def test_get_session_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_session - ] = mock_object + ] = mock_rpc request = {} await client.get_session(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_session(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2363,22 +2366,23 @@ async def test_list_sessions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_sessions - ] = mock_object + ] = mock_rpc request = {} await client.list_sessions(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_sessions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2914,22 +2918,23 @@ async def test_delete_session_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_session - ] = mock_object + ] = mock_rpc request = {} await client.delete_session(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_session(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3266,22 +3271,23 @@ async def test_execute_sql_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.execute_sql - ] = mock_object + ] = mock_rpc request = {} await client.execute_sql(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.execute_sql(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3557,22 +3563,23 @@ async def test_execute_streaming_sql_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.execute_streaming_sql - ] = mock_object + ] = mock_rpc request = {} await client.execute_streaming_sql(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.execute_streaming_sql(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3850,22 +3857,23 @@ async def test_execute_batch_dml_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.execute_batch_dml - ] = mock_object + ] = mock_rpc request = {} await client.execute_batch_dml(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.execute_batch_dml(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4131,22 +4139,23 @@ async def test_read_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio" ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.read - ] = mock_object + ] = mock_rpc request = {} await client.read(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.read(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4411,22 +4420,23 @@ async def test_streaming_read_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.streaming_read - ] = mock_object + ] = mock_rpc request = {} await client.streaming_read(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.streaming_read(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4703,22 +4713,23 @@ async def test_begin_transaction_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.begin_transaction - ] = mock_object + ] = mock_rpc request = {} await client.begin_transaction(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.begin_transaction(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5104,22 +5115,23 @@ async def test_commit_async_use_cached_wrapped_rpc(transport: str = "grpc_asynci ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.commit - ] = mock_object + ] = mock_rpc request = {} await client.commit(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.commit(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5512,22 +5524,23 @@ async def test_rollback_async_use_cached_wrapped_rpc(transport: str = "grpc_asyn ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.rollback - ] = mock_object + ] = mock_rpc request = {} await client.rollback(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.rollback(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5874,22 +5887,23 @@ async def test_partition_query_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.partition_query - ] = mock_object + ] = mock_rpc request = {} await client.partition_query(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.partition_query(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6152,22 +6166,23 @@ async def test_partition_read_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.partition_read - ] = mock_object + ] = mock_rpc request = {} await client.partition_read(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.partition_read(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6428,22 +6443,23 @@ async def test_batch_write_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.batch_write - ] = mock_object + ] = mock_rpc request = {} await client.batch_write(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.batch_write(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio