Skip to content

Commit 43059ed

Browse files
gcf-owl-bot[bot]parthea
authored andcommitted
feat: add ResourceExhausted to retryable error for Write API unary calls (#612)
* feat: add ResourceExhausted to retryable error for Write API unary calls docs: add multiplexing documentation PiperOrigin-RevId: 545839491 Source-Link: googleapis/googleapis@2b006af Source-Link: googleapis/googleapis-gen@0d52d38 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMGQ1MmQzODViZDRlNzhjN2IyYzgzNzU1MDEzZmUxMDNlODA0YzM4NCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
1 parent 30da01e commit 43059ed

File tree

7 files changed

+68
-35
lines changed

7 files changed

+68
-35
lines changed

packages/google-cloud-bigquery-storage/google/cloud/bigquery_storage_v1/services/big_query_write/async_client.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -437,10 +437,11 @@ def request_generator():
437437
requests (AsyncIterator[`google.cloud.bigquery_storage_v1.types.AppendRowsRequest`]):
438438
The request object AsyncIterator. Request message for ``AppendRows``.
439439
440-
Due to the nature of AppendRows being a bidirectional
441-
streaming RPC, certain parts of the AppendRowsRequest
442-
need only be specified for the first request sent each
443-
time the gRPC network connection is opened/reopened.
440+
Because AppendRows is a bidirectional streaming RPC,
441+
certain parts of the AppendRowsRequest need only be
442+
specified for the first request before switching table
443+
destinations. You can also switch table destinations
444+
within the same connection for the default stream.
444445
445446
The size of a single AppendRowsRequest must be less than
446447
10 MB in size. Requests larger than this return an
@@ -575,6 +576,7 @@ async def sample_get_write_stream():
575576
multiplier=1.3,
576577
predicate=retries.if_exception_type(
577578
core_exceptions.DeadlineExceeded,
579+
core_exceptions.ResourceExhausted,
578580
core_exceptions.ServiceUnavailable,
579581
),
580582
deadline=600.0,
@@ -685,6 +687,7 @@ async def sample_finalize_write_stream():
685687
multiplier=1.3,
686688
predicate=retries.if_exception_type(
687689
core_exceptions.DeadlineExceeded,
690+
core_exceptions.ResourceExhausted,
688691
core_exceptions.ServiceUnavailable,
689692
),
690693
deadline=600.0,
@@ -801,6 +804,7 @@ async def sample_batch_commit_write_streams():
801804
multiplier=1.3,
802805
predicate=retries.if_exception_type(
803806
core_exceptions.DeadlineExceeded,
807+
core_exceptions.ResourceExhausted,
804808
core_exceptions.ServiceUnavailable,
805809
),
806810
deadline=600.0,
@@ -919,6 +923,7 @@ async def sample_flush_rows():
919923
multiplier=1.3,
920924
predicate=retries.if_exception_type(
921925
core_exceptions.DeadlineExceeded,
926+
core_exceptions.ResourceExhausted,
922927
core_exceptions.ServiceUnavailable,
923928
),
924929
deadline=600.0,

packages/google-cloud-bigquery-storage/google/cloud/bigquery_storage_v1/services/big_query_write/client.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -672,10 +672,11 @@ def request_generator():
672672
requests (Iterator[google.cloud.bigquery_storage_v1.types.AppendRowsRequest]):
673673
The request object iterator. Request message for ``AppendRows``.
674674
675-
Due to the nature of AppendRows being a bidirectional
676-
streaming RPC, certain parts of the AppendRowsRequest
677-
need only be specified for the first request sent each
678-
time the gRPC network connection is opened/reopened.
675+
Because AppendRows is a bidirectional streaming RPC,
676+
certain parts of the AppendRowsRequest need only be
677+
specified for the first request before switching table
678+
destinations. You can also switch table destinations
679+
within the same connection for the default stream.
679680
680681
The size of a single AppendRowsRequest must be less than
681682
10 MB in size. Requests larger than this return an

packages/google-cloud-bigquery-storage/google/cloud/bigquery_storage_v1/services/big_query_write/transports/base.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,7 @@ def _prep_wrapped_messages(self, client_info):
166166
multiplier=1.3,
167167
predicate=retries.if_exception_type(
168168
core_exceptions.DeadlineExceeded,
169+
core_exceptions.ResourceExhausted,
169170
core_exceptions.ServiceUnavailable,
170171
),
171172
deadline=600.0,
@@ -181,6 +182,7 @@ def _prep_wrapped_messages(self, client_info):
181182
multiplier=1.3,
182183
predicate=retries.if_exception_type(
183184
core_exceptions.DeadlineExceeded,
185+
core_exceptions.ResourceExhausted,
184186
core_exceptions.ServiceUnavailable,
185187
),
186188
deadline=600.0,
@@ -196,6 +198,7 @@ def _prep_wrapped_messages(self, client_info):
196198
multiplier=1.3,
197199
predicate=retries.if_exception_type(
198200
core_exceptions.DeadlineExceeded,
201+
core_exceptions.ResourceExhausted,
199202
core_exceptions.ServiceUnavailable,
200203
),
201204
deadline=600.0,
@@ -211,6 +214,7 @@ def _prep_wrapped_messages(self, client_info):
211214
multiplier=1.3,
212215
predicate=retries.if_exception_type(
213216
core_exceptions.DeadlineExceeded,
217+
core_exceptions.ResourceExhausted,
214218
core_exceptions.ServiceUnavailable,
215219
),
216220
deadline=600.0,

packages/google-cloud-bigquery-storage/google/cloud/bigquery_storage_v1/types/storage.py

Lines changed: 42 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -358,10 +358,11 @@ class CreateWriteStreamRequest(proto.Message):
358358
class AppendRowsRequest(proto.Message):
359359
r"""Request message for ``AppendRows``.
360360
361-
Due to the nature of AppendRows being a bidirectional streaming RPC,
362-
certain parts of the AppendRowsRequest need only be specified for
363-
the first request sent each time the gRPC network connection is
364-
opened/reopened.
361+
Because AppendRows is a bidirectional streaming RPC, certain parts
362+
of the AppendRowsRequest need only be specified for the first
363+
request before switching table destinations. You can also switch
364+
table destinations within the same connection for the default
365+
stream.
365366
366367
The size of a single AppendRowsRequest must be less than 10 MB in
367368
size. Requests larger than this return an error, typically
@@ -372,11 +373,14 @@ class AppendRowsRequest(proto.Message):
372373
373374
Attributes:
374375
write_stream (str):
375-
Required. The write_stream identifies the target of the
376-
append operation, and only needs to be specified as part of
377-
the first request on the gRPC connection. If provided for
378-
subsequent requests, it must match the value of the first
379-
request.
376+
Required. The write_stream identifies the append operation.
377+
It must be provided in the following scenarios:
378+
379+
- In the first request to an AppendRows connection.
380+
381+
- In all subsequent requests to an AppendRows connection,
382+
if you use the same connection to write to multiple
383+
tables or change the input schema for default streams.
380384
381385
For explicitly created write streams, the format is:
382386
@@ -385,6 +389,23 @@ class AppendRowsRequest(proto.Message):
385389
For the special default stream, the format is:
386390
387391
- ``projects/{project}/datasets/{dataset}/tables/{table}/streams/_default``.
392+
393+
An example of a possible sequence of requests with
394+
write_stream fields within a single connection:
395+
396+
- r1: {write_stream: stream_name_1}
397+
398+
- r2: {write_stream: /*omit*/}
399+
400+
- r3: {write_stream: /*omit*/}
401+
402+
- r4: {write_stream: stream_name_2}
403+
404+
- r5: {write_stream: stream_name_2}
405+
406+
The destination changed in request_4, so the write_stream
407+
field must be populated in all subsequent requests in this
408+
stream.
388409
offset (google.protobuf.wrappers_pb2.Int64Value):
389410
If present, the write is only performed if the next append
390411
offset is same as the provided value. If not present, the
@@ -420,10 +441,10 @@ class AppendRowsRequest(proto.Message):
420441
"""
421442

422443
class MissingValueInterpretation(proto.Enum):
423-
r"""An enum to indicate how to interpret missing values. Missing
424-
values are fields present in user schema but missing in rows. A
425-
missing value can represent a NULL or a column default value
426-
defined in BigQuery table schema.
444+
r"""An enum to indicate how to interpret missing values of fields
445+
that are present in user schema but missing in rows. A missing
446+
value can represent a NULL or a column default value defined in
447+
BigQuery table schema.
427448
428449
Values:
429450
MISSING_VALUE_INTERPRETATION_UNSPECIFIED (0):
@@ -446,11 +467,14 @@ class ProtoData(proto.Message):
446467
447468
Attributes:
448469
writer_schema (google.cloud.bigquery_storage_v1.types.ProtoSchema):
449-
Proto schema used to serialize the data.
450-
This value only needs to be provided as part of
451-
the first request on a gRPC network connection,
452-
and will be ignored for subsequent requests on
453-
the connection.
470+
The protocol buffer schema used to serialize the data.
471+
Provide this value whenever:
472+
473+
- You send the first request of an RPC connection.
474+
475+
- You change the input schema.
476+
477+
- You specify a new destination table.
454478
rows (google.cloud.bigquery_storage_v1.types.ProtoRows):
455479
Serialized row data in protobuf message
456480
format. Currently, the backend expects the

packages/google-cloud-bigquery-storage/google/cloud/bigquery_storage_v1/types/stream.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -136,13 +136,12 @@ class ReadSession(proto.Message):
136136
incomplete or stale.
137137
estimated_total_physical_file_size (int):
138138
Output only. A pre-projected estimate of the
139-
total physical size (in bytes) of files this
140-
session will scan when all streams are
141-
completely consumed. This estimate does not
142-
depend on the selected columns and can be based
143-
on metadata from the table which might be
144-
incomplete or stale. Only set for BigLake
145-
tables.
139+
total physical size of files (in bytes) that
140+
this session will scan when all streams are
141+
consumed. This estimate is independent of the
142+
selected columns and can be based on incomplete
143+
or stale metadata from the table. This field is
144+
only set for BigLake tables.
146145
estimated_row_count (int):
147146
Output only. An estimate on the number of
148147
rows present in this session's streams. This

packages/google-cloud-bigquery-storage/samples/generated_samples/snippet_metadata_google.cloud.bigquery.storage.v1.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
],
99
"language": "PYTHON",
1010
"name": "google-cloud-bigquery-storage",
11-
"version": "2.21.0"
11+
"version": "0.1.0"
1212
},
1313
"snippets": [
1414
{

packages/google-cloud-bigquery-storage/samples/generated_samples/snippet_metadata_google.cloud.bigquery.storage.v1beta2.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
],
99
"language": "PYTHON",
1010
"name": "google-cloud-bigquery-storage",
11-
"version": "2.21.0"
11+
"version": "0.1.0"
1212
},
1313
"snippets": [
1414
{

0 commit comments

Comments
 (0)