diff --git a/docs/index.rst b/docs/index.rst index ee47a2ac378f..d9afe5f31af4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,7 +8,7 @@ datastore/usage dns/usage language/usage - pubsub/usage + pubsub/index resource-manager/api runtimeconfig/usage spanner/usage diff --git a/docs/pubsub/client.rst b/docs/pubsub/client.rst deleted file mode 100644 index 2745c1d808ee..000000000000 --- a/docs/pubsub/client.rst +++ /dev/null @@ -1,6 +0,0 @@ -Pub/Sub Client -============== - -.. automodule:: google.cloud.pubsub.client - :members: - :show-inheritance: diff --git a/docs/pubsub/iam.rst b/docs/pubsub/iam.rst deleted file mode 100644 index 26943762605b..000000000000 --- a/docs/pubsub/iam.rst +++ /dev/null @@ -1,7 +0,0 @@ -IAM Policy -~~~~~~~~~~ - -.. automodule:: google.cloud.pubsub.iam - :members: - :member-order: bysource - :show-inheritance: diff --git a/docs/pubsub/index.rst b/docs/pubsub/index.rst new file mode 100644 index 000000000000..7b7438b29f9c --- /dev/null +++ b/docs/pubsub/index.rst @@ -0,0 +1,117 @@ +####### +Pub/Sub +####### + +`Google Cloud Pub/Sub`_ is a fully-managed real-time messaging service that +allows you to send and receive messages between independent applications. You +can leverage Cloud Pub/Sub’s flexibility to decouple systems and components +hosted on Google Cloud Platform or elsewhere on the Internet. By building on +the same technology Google uses, Cloud Pub/Sub is designed to provide “at +least once” delivery at low latency with on-demand scalability to 1 million +messages per second (and beyond). + +.. _Google Cloud Pub/Sub: https://cloud.google.com/pubsub/ + +******************************** +Authentication and Configuration +******************************** + +- For an overview of authentication in ``google-cloud-python``, + see :doc:`/core/auth`. + +- In addition to any authentication configuration, you should also set the + :envvar:`GOOGLE_CLOUD_PROJECT` environment variable for the project you'd + like to interact with. If the :envvar:`GOOGLE_CLOUD_PROJECT` environment + variable is not present, the project ID from JSON file credentials is used. + + If you are using Google App Engine or Google Compute Engine + this will be detected automatically. + +- After configuring your environment, create a + :class:`~google.cloud.pubsub_v1.PublisherClient` or + :class:`~google.cloud.pubsub_v1.SubscriberClient`. + +.. code-block:: python + + >>> from google.cloud import pubsub + >>> publisher = pubsub.PublisherClient() + >>> subscriber = pubsub.SubscriberClient() + +or pass in ``credentials`` explicitly. + +.. code-block:: python + + >>> from google.cloud import pubsub + >>> client = pubsub.PublisherClient( + ... credentials=creds, + ... ) + +********** +Publishing +********** + +To publish data to Cloud Pub/Sub you must create a topic, and then publish +messages to it + +.. code-block:: python + + >>> import os + >>> from google.cloud import pubsub + >>> + >>> publisher = pubsub.PublisherClient() + >>> topic = 'projects/{project_id}/topics/{topic}'.format( + ... project_id=os.getenv('GOOGLE_CLOUD_PROJECT'), + ... topic='MY_TOPIC_NAME', # Set this to something appropriate. + ... ) + >>> publisher.create_topic() + >>> publisher.publish(topic, b'My first message!', spam='eggs') + +To learn more, consult the :doc:`publishing documentation `. + + +*********** +Subscribing +*********** + +To subscribe to data in Cloud Pub/Sub, you create a subscription based on +the topic, and subscribe to that. + +.. code-block:: python + + >>> import os + >>> from google.cloud import pubsub + >>> + >>> subscriber = pubsub.SubscriberClient() + >>> topic = 'projects/{project_id}/topics/{topic}'.format( + ... project_id=os.getenv('GOOGLE_CLOUD_PROJECT'), + ... topic='MY_TOPIC_NAME', # Set this to something appropriate. + ... ) + >>> subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format( + ... project_id=os.getenv('GOOGLE_CLOUD_PROJECT'), + ... sub='MY_SUBSCRIPTION_NAME', # Set this to something appropriate. + ... ) + >>> subscription = subscriber.create_subscription(topic, subscription) + +The subscription is opened asychronously, and messages are processed by +use of a callback. + +.. code-block:: python + + >>> def callback(message): + ... print(message.data) + ... message.ack() + >>> subscription.open(callback) + +To learn more, consult the :doc:`subscriber documentation `. + + +********** +Learn More +********** + +.. toctree:: + :maxdepth: 3 + + publisher/index + subscriber/index + types diff --git a/docs/pubsub/message.rst b/docs/pubsub/message.rst deleted file mode 100644 index 654c607d46b3..000000000000 --- a/docs/pubsub/message.rst +++ /dev/null @@ -1,6 +0,0 @@ -Message -~~~~~~~ - -.. automodule:: google.cloud.pubsub.message - :members: - :show-inheritance: diff --git a/docs/pubsub/publisher/api/batch.rst b/docs/pubsub/publisher/api/batch.rst new file mode 100644 index 000000000000..5846d3ff9416 --- /dev/null +++ b/docs/pubsub/publisher/api/batch.rst @@ -0,0 +1,8 @@ +:orphan: + +Batch API +========= + +.. automodule:: google.cloud.pubsub_v1.publisher.batch.thread + :members: + :inherited-members: diff --git a/docs/pubsub/publisher/api/client.rst b/docs/pubsub/publisher/api/client.rst new file mode 100644 index 000000000000..47a3aa3d5d7a --- /dev/null +++ b/docs/pubsub/publisher/api/client.rst @@ -0,0 +1,6 @@ +Publisher Client API +==================== + +.. automodule:: google.cloud.pubsub_v1.publisher.client + :members: + :inherited-members: diff --git a/docs/pubsub/publisher/index.rst b/docs/pubsub/publisher/index.rst new file mode 100644 index 000000000000..72b374b588a3 --- /dev/null +++ b/docs/pubsub/publisher/index.rst @@ -0,0 +1,126 @@ +Publishing Messages +=================== + +Publishing messages is handled through the +:class:`~.pubsub_v1.publisher.client.Client` class (aliased as +``google.cloud.pubsub.PublisherClient``). This class provides methods to +create topics, and (most importantly) a +:meth:`~.pubsub_v1.publisher.client.Client.publish` method that publishes +messages to Pub/Sub. + +Instantiating a publishing client is straightforward: + +.. code-block:: python + + from google.cloud import pubsub + publish_client = pubsub.PublisherClient() + + +Publish a Message +----------------- + +To publish a message, use the +:meth:`~.pubsub_v1.publisher.client.Client.publish` method. This method accepts +two positional arguments: the topic to publish to, and the body of the message. +It also accepts arbitrary keyword arguments, which are passed along as +attributes of the message. + +The topic is passed along as a string; all topics have the canonical form of +``projects/{project_name}/topics/{topic_name}``. + +Therefore, a very basic publishing call looks like: + +.. code-block:: python + + topic = 'projects/{project}/topics/{topic}' + publish_client.publish(topic, b'This is my message.') + +.. note:: + + The message data in Pub/Sub is an opaque blob of bytes, and as such, you + *must* send a ``bytes`` object in Python 3 (``str`` object in Python 2). + If you send a text string (``str`` in Python 3, ``unicode`` in Python 2), + the method will raise :exc:`TypeError`. + + The reason it works this way is because there is no reasonable guarantee + that the same language or environment is being used by the subscriber, + and so it is the responsibility of the publisher to properly encode + the payload. + +If you want to include attributes, simply add keyword arguments: + +.. code-block:: python + + topic = 'projects/{project}/topics/{topic}' + publish_client.publish(topic, b'This is my message.', foo='bar') + + +Batching +-------- + +Whenever you publish a message, a +:class:`~.pubsub_v1.publisher.batch.thread.Batch` is automatically created. +This way, if you publish a large volume of messages, it reduces the number of +requests made to the server. + +The way that this works is that on the first message that you send, a new +:class:`~.pubsub_v1.publisher.batch.thread.Batch` is created automatically. +For every subsequent message, if there is already a valid batch that is still +accepting messages, then that batch is used. When the batch is created, it +begins a countdown that publishes the batch once sufficient time has +elapsed (by default, this is 0.05 seconds). + +If you need different batching settings, simply provide a +:class:`~.pubsub_v1.types.BatchSettings` object when you instantiate the +:class:`~.pubsub_v1.publisher.client.Client`: + +.. code-block:: python + + from google.cloud import pubsub + from google.cloud.pubsub import types + + client = pubsub.PublisherClient( + batch_settings=BatchSettings(max_messages=500), + ) + +Pub/Sub accepts a maximum of 1,000 messages in a batch, and the size of a +batch can not exceed 10 megabytes. + + +Futures +------- + +Every call to :meth:`~.pubsub_v1.publisher.client.Client.publish` will return +a class that conforms to the :class:`~concurrent.futures.Future` interface. +You can use this to ensure that the publish succeeded: + +.. code-block:: python + + # The .result() method will block until the future is complete. + # If there is an error, it will raise an exception. + future = client.publish(topic, b'My awesome message.') + message_id = future.result() + +You can also attach a callback to the future: + +.. code-block:: python + + # Callbacks receive the future as their only argument, as defined in + # the Future interface. + def callback(future): + message_id = future.result() + do_something_with(message_id) + + # The callback is added once you get the future. If you add a callback + # and the future is already done, it will simply be executed immediately. + future = client.publish(topic, b'My awesome message.') + future.add_done_callback(callback) + + +API Reference +------------- + +.. toctree:: + :maxdepth: 2 + + api/client diff --git a/docs/pubsub/snippets.py b/docs/pubsub/snippets.py deleted file mode 100644 index 96eea175c0cd..000000000000 --- a/docs/pubsub/snippets.py +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright 2016 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Testable usage examples for Google Cloud Pubsub API wrapper - -Each example function takes a ``client`` argument (which must be an instance -of :class:`google.cloud.pubsub.client.Client`) and uses it to perform a task -with the API. - -To facilitate running the examples as system tests, each example is also passed -a ``to_delete`` list; the function adds to the list any objects created which -need to be deleted during teardown. -""" - -import time - -from google.cloud.pubsub.client import Client - - -def snippet(func): - """Mark ``func`` as a snippet example function.""" - func._snippet = True - return func - - -def _millis(): - return time.time() * 1000 - - -@snippet -def client_list_topics(client, to_delete): # pylint: disable=unused-argument - """List topics for a project.""" - - def do_something_with(sub): # pylint: disable=unused-argument - pass - - # [START client_list_topics] - for topic in client.list_topics(): # API request(s) - do_something_with(topic) - # [END client_list_topics] - - -@snippet -def client_list_subscriptions(client, - to_delete): # pylint: disable=unused-argument - """List all subscriptions for a project.""" - - def do_something_with(sub): # pylint: disable=unused-argument - pass - - # [START client_list_subscriptions] - for subscription in client.list_subscriptions(): # API request(s) - do_something_with(subscription) - # [END client_list_subscriptions] - - -@snippet -def client_topic(client, to_delete): # pylint: disable=unused-argument - """Topic factory.""" - TOPIC_NAME = 'topic_factory-%d' % (_millis(),) - - # [START client_topic] - topic = client.topic(TOPIC_NAME) - # [END client_topic] - - -@snippet -def client_subscription(client, to_delete): # pylint: disable=unused-argument - """Subscription factory.""" - SUBSCRIPTION_NAME = 'subscription_factory-%d' % (_millis(),) - - # [START client_subscription] - subscription = client.subscription( - SUBSCRIPTION_NAME, ack_deadline=60, - retain_acked_messages=True) - # [END client_subscription] - - -@snippet -def topic_create(client, to_delete): - """Create a topic.""" - TOPIC_NAME = 'topic_create-%d' % (_millis(),) - - # [START topic_create] - topic = client.topic(TOPIC_NAME) - topic.create() # API request - # [END topic_create] - - to_delete.append(topic) - - -@snippet -def topic_exists(client, to_delete): - """Test existence of a topic.""" - TOPIC_NAME = 'topic_exists-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - to_delete.append(topic) - - # [START topic_exists] - assert not topic.exists() # API request - topic.create() # API request - assert topic.exists() # API request - # [END topic_exists] - - -@snippet -def topic_delete(client, to_delete): # pylint: disable=unused-argument - """Delete a topic.""" - TOPIC_NAME = 'topic_delete-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() # API request - - # [START topic_delete] - assert topic.exists() # API request - topic.delete() - assert not topic.exists() # API request - # [END topic_delete] - - -@snippet -def topic_iam_policy(client, to_delete): - """Fetch / set a topic's IAM policy.""" - TOPIC_NAME = 'topic_iam_policy-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - # [START topic_get_iam_policy] - policy = topic.get_iam_policy() # API request - # [END topic_get_iam_policy] - - assert len(policy.viewers) == 0 - assert len(policy.editors) == 0 - assert len(policy.owners) == 0 - - # [START topic_set_iam_policy] - ALL_USERS = policy.all_users() - policy.viewers = [ALL_USERS] - LOGS_GROUP = policy.group('cloud-logs@google.com') - policy.editors = [LOGS_GROUP] - new_policy = topic.set_iam_policy(policy) # API request - # [END topic_set_iam_policy] - - assert ALL_USERS in new_policy.viewers - assert LOGS_GROUP in new_policy.editors - - -# @snippet # Disabled due to #1687 -def topic_check_iam_permissions(client, to_delete): - """Check topic IAM permissions.""" - TOPIC_NAME = 'topic_check_iam_permissions-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - # [START topic_check_iam_permissions] - from google.cloud.pubsub.iam import OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE - TO_CHECK = [OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE] - ALLOWED = topic.check_iam_permissions(TO_CHECK) - assert set(ALLOWED) == set(TO_CHECK) - # [END topic_check_iam_permissions] - - -@snippet -def topic_publish_messages(client, to_delete): - """Publish messages to a topic.""" - TOPIC_NAME = 'topic_publish_messages-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - # [START topic_publish_simple_message] - topic.publish(b'This is the message payload') # API request - # [END topic_publish_simple_message] - - # [START topic_publish_message_with_attrs] - topic.publish(b'Another message payload', extra='EXTRA') # API request - # [END topic_publish_message_with_attrs] - - -@snippet -def topic_subscription(client, to_delete): - """Create subscriptions to a topic.""" - TOPIC_NAME = 'topic_subscription-%d' % (_millis(),) - SUB_DEFAULTS = 'topic_subscription-defaults-%d' % (_millis(),) - SUB_ACK90 = 'topic_subscription-ack90-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - # [START topic_subscription_defaults] - sub_defaults = topic.subscription(SUB_DEFAULTS) - # [END topic_subscription_defaults] - - sub_defaults.create() # API request - to_delete.append(sub_defaults) - expected_names = set() - expected_names.add(sub_defaults.full_name) - - # [START topic_subscription_ack90] - sub_ack90 = topic.subscription(SUB_ACK90, ack_deadline=90) - # [END topic_subscription_ack90] - - sub_ack90.create() # API request - to_delete.append(sub_ack90) - expected_names.add(sub_ack90.full_name) - - sub_names = set() - - def do_something_with(sub): - sub_names.add(sub.full_name) - - # [START topic_list_subscriptions] - for subscription in topic.list_subscriptions(): # API request(s) - do_something_with(subscription) - # [END topic_list_subscriptions] - - assert sub_names.issuperset(expected_names) - - -# @snippet: disabled, because push-mode requires a validated endpoint URL -def topic_subscription_push(client, to_delete): - """Create subscriptions to a topic.""" - TOPIC_NAME = 'topic_subscription_push-%d' % (_millis(),) - SUB_PUSH = 'topic_subscription_push-sub-%d' % (_millis(),) - PUSH_URL = 'https://api.example.com/push-endpoint' - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - # [START topic_subscription_push] - subscription = topic.subscription(SUB_PUSH, push_endpoint=PUSH_URL) - subscription.create() # API request - # [END topic_subscription_push] - - # [START subscription_push_pull] - subscription.modify_push_configuration(push_endpoint=None) # API request - # [END subscription_push_pull] - - # [START subscription_pull_push] - subscription.modify_push_configuration( - push_endpoint=PUSH_URL) # API request - # [END subscription_pull_push] - - -@snippet -def subscription_lifecycle(client, to_delete): - """Test lifecycle of a subscription.""" - TOPIC_NAME = 'subscription_lifecycle-%d' % (_millis(),) - SUB_NAME = 'subscription_lifecycle-defaults-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - # [START subscription_create] - subscription = topic.subscription(SUB_NAME) - subscription.create() # API request - # [END subscription_create] - - # [START subscription_exists] - assert subscription.exists() # API request - # [END subscription_exists] - - # [START subscription_reload] - subscription.reload() # API request - # [END subscription_reload] - - # [START subscription_delete] - subscription.delete() # API request - # [END subscription_delete] - - -@snippet -def subscription_pull(client, to_delete): - """Pull messges from a subscribed topic.""" - TOPIC_NAME = 'subscription_pull-%d' % (_millis(),) - SUB_NAME = 'subscription_pull-defaults-%d' % (_millis(),) - PAYLOAD1 = b'PAYLOAD1' - PAYLOAD2 = b'PAYLOAD2' - EXTRA = 'EXTRA' - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - subscription = topic.subscription(SUB_NAME) - subscription.create() - to_delete.append(subscription) - - # [START subscription_pull_return_immediately] - pulled = subscription.pull(return_immediately=True) - # [END subscription_pull_return_immediately] - assert len(pulled) == 0, "unexpected message" - - topic.publish(PAYLOAD1) - topic.publish(PAYLOAD2, extra=EXTRA) - - time.sleep(1) # eventually-consistent - - # [START subscription_pull] - pulled = subscription.pull(max_messages=2) - # [END subscription_pull] - - assert len(pulled) == 2, "eventual consistency" - - # [START subscription_modify_ack_deadline] - for ack_id, _ in pulled: - subscription.modify_ack_deadline(ack_id, 90) # API request - # [END subscription_modify_ack_deadline] - - payloads = [] - extras = [] - - def do_something_with(message): # pylint: disable=unused-argument - payloads.append(message.data) - if message.attributes: - extras.append(message.attributes) - - class ApplicationException(Exception): - pass - - def log_exception(_): - pass - - # [START subscription_acknowledge] - for ack_id, message in pulled: - try: - do_something_with(message) - except ApplicationException as e: - log_exception(e) - else: - subscription.acknowledge([ack_id]) - # [END subscription_acknowledge] - - assert set(payloads) == set([PAYLOAD1, PAYLOAD2]), 'payloads: %s' % ( - (payloads,)) - assert extras == [{'extra': EXTRA}], 'extras: %s' % ( - (extras,)) - - -@snippet -def subscription_pull_w_autoack(client, to_delete): - """Pull messges from a topic, auto-acknowldging them""" - TOPIC_NAME = 'subscription_pull_autoack-%d' % (_millis(),) - SUB_NAME = 'subscription_pull_autoack-defaults-%d' % (_millis(),) - PAYLOAD1 = b'PAYLOAD1' - PAYLOAD2 = b'PAYLOAD2' - EXTRA = 'EXTRA' - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - subscription = topic.subscription(SUB_NAME) - subscription.create() - to_delete.append(subscription) - - # [START topic_batch] - with topic.batch() as batch: - batch.publish(PAYLOAD1) - batch.publish(PAYLOAD2, extra=EXTRA) - # [END topic_batch] - - time.sleep(1) # eventually-consistent - - payloads = [] - extras = [] - - def do_something_with(message): # pylint: disable=unused-argument - payloads.append(message.data) - if message.attributes: - extras.append(message.attributes) - - # [START subscription_pull_autoack] - from google.cloud.pubsub.subscription import AutoAck - with AutoAck(subscription, max_messages=10) as ack: - for ack_id, message in list(ack.items()): - try: - do_something_with(message) - except Exception: # pylint: disable=broad-except - del ack[ack_id] - # [END subscription_pull_autoack] - - assert set(payloads) == set(PAYLOAD1, PAYLOAD1), "eventual consistency" - assert extras == [{'extra': EXTRA}], "eventual consistency" - - -@snippet -def subscription_iam_policy(client, to_delete): - """Fetch / set a subscription's IAM policy.""" - TOPIC_NAME = 'subscription_iam_policy-%d' % (_millis(),) - SUB_NAME = 'subscription_iam_policy-defaults-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - subscription = topic.subscription(SUB_NAME) - subscription.create() - to_delete.append(subscription) - - # [START subscription_get_iam_policy] - policy = subscription.get_iam_policy() # API request - # [END subscription_get_iam_policy] - - assert len(policy.viewers) == 0 - assert len(policy.editors) == 0 - assert len(policy.owners) == 0 - - # [START subscription_set_iam_policy] - ALL_USERS = policy.all_users() - policy.viewers = [ALL_USERS] - LOGS_GROUP = policy.group('cloud-logs@google.com') - policy.editors = [LOGS_GROUP] - new_policy = subscription.set_iam_policy(policy) # API request - # [END subscription_set_iam_policy] - - assert ALL_USERS in new_policy.viewers - assert LOGS_GROUP in new_policy.editors - - -# @snippet # Disabled due to #1687 -def subscription_check_iam_permissions(client, to_delete): - """Check subscription IAM permissions.""" - TOPIC_NAME = 'subscription_check_iam_permissions-%d' % (_millis(),) - SUB_NAME = 'subscription_check_iam_permissions-defaults-%d' % (_millis(),) - topic = client.topic(TOPIC_NAME) - topic.create() - to_delete.append(topic) - - subscription = topic.subscription(SUB_NAME) - subscription.create() - to_delete.append(subscription) - - # [START subscription_check_iam_permissions] - from google.cloud.pubsub.iam import OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE - TO_CHECK = [OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE] - ALLOWED = subscription.check_iam_permissions(TO_CHECK) - assert set(ALLOWED) == set(TO_CHECK) - # [END subscription_check_iam_permissions] - - -def _line_no(func): - code = getattr(func, '__code__', None) or getattr(func, 'func_code') - return code.co_firstlineno - - -def _find_examples(): - funcs = [obj for obj in globals().values() - if getattr(obj, '_snippet', False)] - for func in sorted(funcs, key=_line_no): - yield func - - -def _name_and_doc(func): - return func.__name__, func.__doc__ - - -def main(): - client = Client() - for example in _find_examples(): - to_delete = [] - print('%-25s: %s' % _name_and_doc(example)) - try: - example(client, to_delete) - except AssertionError as e: - print(' FAIL: %s' % (e,)) - except Exception as e: # pylint: disable=broad-except - print(' ERROR: %r' % (e,)) - for item in to_delete: - item.delete() - - -if __name__ == '__main__': - main() diff --git a/docs/pubsub/subscriber/api/client.rst b/docs/pubsub/subscriber/api/client.rst new file mode 100644 index 000000000000..965880c5a640 --- /dev/null +++ b/docs/pubsub/subscriber/api/client.rst @@ -0,0 +1,6 @@ +Subscriber Client API +===================== + +.. automodule:: google.cloud.pubsub_v1.subscriber.client + :members: + :inherited-members: diff --git a/docs/pubsub/subscriber/api/message.rst b/docs/pubsub/subscriber/api/message.rst new file mode 100644 index 000000000000..d6566f4c363e --- /dev/null +++ b/docs/pubsub/subscriber/api/message.rst @@ -0,0 +1,5 @@ +Messages +======== + +.. autoclass:: google.cloud.pubsub_v1.subscriber.message.Message + :members: ack, attributes, data, nack, publish_time diff --git a/docs/pubsub/subscriber/api/policy.rst b/docs/pubsub/subscriber/api/policy.rst new file mode 100644 index 000000000000..95d288d0b974 --- /dev/null +++ b/docs/pubsub/subscriber/api/policy.rst @@ -0,0 +1,5 @@ +Subscriptions +============= + +.. autoclass:: google.cloud.pubsub_v1.subscriber.policy.thread.Policy + :members: open, close diff --git a/docs/pubsub/subscriber/index.rst b/docs/pubsub/subscriber/index.rst new file mode 100644 index 000000000000..be32a9e9ed97 --- /dev/null +++ b/docs/pubsub/subscriber/index.rst @@ -0,0 +1,123 @@ +Subscribing to Messages +======================= + +Subscribing to messages is handled through the +:class:`~.pubsub_v1.subscriber.client.Client` class (aliased as +``google.cloud.pubsub.SubscriberClient``). This class provides a +:meth:`~.pubsub_v1.subscriber.client.Client.subscribe` method to +attach to subscriptions on existing topics, and (most importantly) a +:meth:`~.pubsub_v1.subscriber.policy.thread.Policy.open` method that +consumes messages from Pub/Sub. + +Instantiating a subscriber client is straightforward: + +.. code-block:: python + + from google.cloud import pubsub + subscriber = pubsub.SubscriberClient() + + +Creating a Subscription +----------------------- + +In Pub/Sub, a **subscription** is a discrete pull of messages from a topic. +If multiple clients pull the same subscription, then messages are split +between them. If multiple clients create a subscription each, then each client +will get every message. + +.. note:: + + Remember that Pub/Sub operates under the principle of "everything at least + once". Even in the case where multiple clients pull the same subscription, + *some* redundancy is likely. + +Creating a subscription requires that you already know what topic you want +to subscribe to, and it must already exist. Once you have that, it is easy: + +.. code-block:: python + + # Substitute {project}, {topic}, and {subscription} with appropriate + # values for your application. + topic_name = 'projects/{project}/topics/{topic}' + sub_name = 'projects/{project}/subscriptions/{subscription}' + subscriber.create_subscription(topic_name, sub_name) + + +Pulling a Subscription +---------------------- + +Once you have created a subscription (or if you already had one), the next +step is to pull data from it. This entails two steps: first you must call +:meth:`~.pubsub_v1.subscriber.client.Client.subscribe`, passing in the +subscription string. + +.. code-block:: python + + # As before, substitute {project} and {subscription} with appropriate + # values for your application. + subscription = subscriber.subscribe( + 'projects/{project}/subscriptions/{subscription}', + ) + +This will return an object with an +:meth:`~.pubsub_v1.subscriber.policy.thread.Policy.open` method; calling +this method will actually begin consumption of the subscription. + + +Subscription Callbacks +---------------------- + +Because subscriptions in this Pub/Sub client are opened asychronously, +processing the messages that are yielded by the subscription is handled +through **callbacks**. + +The basic idea: Define a function that takes one argument; this argument +will be a :class:`~.pubsub_v1.subscriber.message.Message` instance. This +function should do whatever processing is necessary. At the end, the +function should :meth:`~.pubsub_v1.subscriber.message.Message.ack` the +message. + +When you call :meth:`~.pubsub_v1.subscriber.policy.thread.Policy.open`, you +must pass the callback that will be used. + +Here is an example: + +.. code-block:: python + + # Define the callback. + # Note that the callback is defined *before* the subscription is opened. + def callback(message): + do_something_with(message) # Replace this with your acutal logic. + message.ack() + + # Open the subscription, passing the callback. + subscription.open(callback) + +Explaining Ack +-------------- + +In Pub/Sub, the term **ack** stands for "acknowledge". You should ack a +message when your processing of that message *has completed*. When you ack +a message, you are telling Pub/Sub that you do not need to see it again. + +It might be tempting to ack messages immediately on receipt. While there +are valid use cases for this, in general it is unwise. The reason why: If +there is some error or edge case in your processing logic, and processing +of the message fails, you will have already told Pub/Sub that you successfully +processed the message. By contrast, if you ack only upon completion, then +Pub/Sub will eventually re-deliver the unacknowledged message. + +It is also possible to **nack** a message, which is the opposite. When you +nack, it tells Pub/Sub that you are unable or unwilling to deal with the +message, and that the service should redeliver it. + + +API Reference +------------- + +.. toctree:: + :maxdepth: 2 + + api/client + api/policy + api/message diff --git a/docs/pubsub/subscription.rst b/docs/pubsub/subscription.rst deleted file mode 100644 index f242cb644e83..000000000000 --- a/docs/pubsub/subscription.rst +++ /dev/null @@ -1,7 +0,0 @@ -Subscriptions -~~~~~~~~~~~~~ - -.. automodule:: google.cloud.pubsub.subscription - :members: - :member-order: bysource - :show-inheritance: diff --git a/docs/pubsub/topic.rst b/docs/pubsub/topic.rst deleted file mode 100644 index 323d467a08ce..000000000000 --- a/docs/pubsub/topic.rst +++ /dev/null @@ -1,7 +0,0 @@ -Topics -~~~~~~ - -.. automodule:: google.cloud.pubsub.topic - :members: - :member-order: bysource - :show-inheritance: diff --git a/docs/pubsub/types.rst b/docs/pubsub/types.rst new file mode 100644 index 000000000000..87c987571766 --- /dev/null +++ b/docs/pubsub/types.rst @@ -0,0 +1,5 @@ +Pub/Sub Client Types +==================== + +.. automodule:: google.cloud.pubsub_v1.types + :members: diff --git a/docs/pubsub/usage.rst b/docs/pubsub/usage.rst deleted file mode 100644 index 96727e654835..000000000000 --- a/docs/pubsub/usage.rst +++ /dev/null @@ -1,245 +0,0 @@ -Pub / Sub -========= - - -.. toctree:: - :maxdepth: 2 - :hidden: - - client - topic - subscription - message - iam - -Authentication / Configuration ------------------------------- - -- Use :class:`Client ` objects to configure - your applications. - -- In addition to any authentication configuration, you should also set the - :envvar:`GOOGLE_CLOUD_PROJECT` environment variable for the project you'd like - to interact with. If you are Google App Engine or Google Compute Engine - this will be detected automatically. - -- The library now enables the ``gRPC`` transport for the pubsub API by - default, assuming that the required dependencies are installed and - importable. To *disable* this transport, set the - :envvar:`GOOGLE_CLOUD_DISABLE_GRPC` environment variable to a - non-empty string, e.g.: ``$ export GOOGLE_CLOUD_DISABLE_GRPC=true``. - -- :class:`Client ` objects hold both a ``project`` - and an authenticated connection to the PubSub service. - -- The authentication credentials can be implicitly determined from the - environment or directly via - :meth:`from_service_account_json ` - and - :meth:`from_service_account_p12 `. - -- After setting ``GOOGLE_APPLICATION_CREDENTIALS`` and ``GOOGLE_CLOUD_PROJECT`` - environment variables, create a :class:`Client ` - - .. code-block:: python - - >>> from google.cloud import pubsub - >>> client = pubsub.Client() - - -Manage topics for a project ---------------------------- - -List topics for the default project: - -.. literalinclude:: snippets.py - :start-after: [START client_list_topics] - :end-before: [END client_list_topics] - -Create a new topic for the default project: - -.. literalinclude:: snippets.py - :start-after: [START topic_create] - :end-before: [END topic_create] - -Check for the existence of a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_exists] - :end-before: [END topic_exists] - -Delete a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_delete] - :end-before: [END topic_delete] - -Fetch the IAM policy for a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_get_iam_policy] - :end-before: [END topic_get_iam_policy] - -Update the IAM policy for a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_set_iam_policy] - :end-before: [END topic_set_iam_policy] - -Test permissions allowed by the current IAM policy on a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_check_iam_permissions] - :end-before: [END topic_check_iam_permissions] - - -Publish messages to a topic ---------------------------- - -Publish a single message to a topic, without attributes: - -.. literalinclude:: snippets.py - :start-after: [START topic_publish_simple_message] - :end-before: [END topic_publish_simple_message] - -Publish a single message to a topic, with attributes: - -.. literalinclude:: snippets.py - :start-after: [START topic_publish_message_with_attrs] - :end-before: [END topic_publish_message_with_attrs] - -Publish a set of messages to a topic (as a single request): - -.. literalinclude:: snippets.py - :start-after: [START topic_batch] - :end-before: [END topic_batch] - -.. note:: - - The only API request happens during the ``__exit__()`` of the topic - used as a context manager, and only if the block exits without raising - an exception. - - -Manage subscriptions to topics ------------------------------- - -List all subscriptions for the default project: - -.. literalinclude:: snippets.py - :start-after: [START client_list_subscriptions] - :end-before: [END client_list_subscriptions] - -List subscriptions for a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_list_subscriptions] - :end-before: [END topic_list_subscriptions] - -Create a new pull subscription for a topic, with defaults: - -.. literalinclude:: snippets.py - :start-after: [START topic_subscription_defaults] - :end-before: [END topic_subscription_defaults] - -Create a new pull subscription for a topic with a non-default ACK deadline: - -.. literalinclude:: snippets.py - :start-after: [START topic_subscription_ack90] - :end-before: [END topic_subscription_ack90] - -Create a new push subscription for a topic: - -.. literalinclude:: snippets.py - :start-after: [START topic_subscription_push] - :end-before: [END topic_subscription_push] - -Check for the existence of a subscription: - -.. literalinclude:: snippets.py - :start-after: [START subscription_exists] - :end-before: [END subscription_exists] - -Convert a pull subscription to push: - -.. literalinclude:: snippets.py - :start-after: [START subscription_pull_push] - :end-before: [END subscription_pull_push] - -Convert a push subscription to pull: - -.. literalinclude:: snippets.py - :start-after: [START subscription_push_pull] - :end-before: [END subscription_push_pull] - -Re-synchronize a subscription with the back-end: - -.. literalinclude:: snippets.py - :start-after: [START subscription_reload] - :end-before: [END subscription_reload] - -Fetch the IAM policy for a subscription - -.. literalinclude:: snippets.py - :start-after: [START subscription_get_iam_policy] - :end-before: [END subscription_get_iam_policy] - -Update the IAM policy for a subscription: - -.. literalinclude:: snippets.py - :start-after: [START subscription_set_iam_policy] - :end-before: [END subscription_set_iam_policy] - -Test permissions allowed by the current IAM policy on a subscription: - -.. literalinclude:: snippets.py - :start-after: [START subscription_check_iam_permissions] - :end-before: [END subscription_check_iam_permissions] - -Delete a subscription: - -.. literalinclude:: snippets.py - :start-after: [START subscription_delete] - :end-before: [END subscription_delete] - - -Pull messages from a subscription ---------------------------------- - -Fetch pending messages for a pull subscription: - -.. literalinclude:: snippets.py - :start-after: [START subscription_pull] - :end-before: [END subscription_pull] - -Note that received messages must be acknowledged, or else the back-end -will re-send them later: - -.. literalinclude:: snippets.py - :start-after: [START subscription_acknowledge] - :end-before: [END subscription_acknowledge] - -Fetch messages for a pull subscription without blocking (none pending): - -.. literalinclude:: snippets.py - :start-after: [START subscription_pull_return_immediately] - :end-before: [END subscription_pull_return_immediately] - -Update the acknowlegement deadline for pulled messages: - -.. literalinclude:: snippets.py - :start-after: [START subscription_modify_ack_deadline] - :end-before: [END subscription_modify_ack_deadline] - -Fetch pending messages, acknowledging those whose processing doesn't raise an -error: - -.. literalinclude:: snippets.py - :start-after: [START subscription_pull_autoack] - :end-before: [END subscription_pull_autoack] - -.. note:: - - The ``pull`` API request occurs at entry to the ``with`` block, and the - ``acknowlege`` API request occurs at the end, passing only the ``ack_ids`` - which haven't been deleted from ``ack`` diff --git a/pubsub/.coveragerc b/pubsub/.coveragerc index a54b99aa14b7..41ca7428e2ee 100644 --- a/pubsub/.coveragerc +++ b/pubsub/.coveragerc @@ -1,11 +1,17 @@ [run] branch = True +source = + google.cloud.pubsub + google.cloud.pubsub_v1 + tests.unit [report] -fail_under = 100 show_missing = True + exclude_lines = # Re-enable the standard pragma pragma: NO COVER # Ignore debug-only repr def __repr__ + # Ignore abstract methods + raise NotImplementedError diff --git a/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client.py b/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client.py index ab8233824595..5313e0d941a1 100644 --- a/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client.py +++ b/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client.py @@ -861,16 +861,14 @@ def create_snapshot(self, name, subscription, options=None): Format is ``projects/{project}/snapshots/{snap}``. subscription (string): The subscription whose backlog the snapshot retains. Specifically, the created snapshot is guaranteed to retain: - (a) The existing backlog on the subscription. More precisely, this is - :: + - The existing backlog on the subscription. More precisely, this is defined as the messages in the subscription's backlog that are unacknowledged upon the successful completion of the `CreateSnapshot` request; as well as: - (b) Any messages published to the subscription's topic following the - :: - + - Any messages published to the subscription's topic following the successful completion of the CreateSnapshot request. + Format is ``projects/{project}/subscriptions/{sub}``. options (:class:`google.gax.CallOptions`): Overrides the default settings for this call, e.g, timeout, retries etc. diff --git a/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client_config.json b/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client_config.json index 4b31158fbac8..6180cc0a941f 100644 --- a/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client_config.json +++ b/pubsub/google/cloud/gapic/pubsub/v1/subscriber_client_config.json @@ -35,6 +35,15 @@ "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 12000, "total_timeout_millis": 600000 + }, + "streaming": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 900000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 900000, + "total_timeout_millis": 900000 } }, "methods": { @@ -79,9 +88,9 @@ "retry_params_name": "messaging" }, "StreamingPull": { - "timeout_millis": 60000, + "timeout_millis": 900000, "retry_codes_name": "pull", - "retry_params_name": "messaging" + "retry_params_name": "streaming" }, "ModifyPushConfig": { "timeout_millis": 60000, diff --git a/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2.py b/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2.py index 07919f8c5646..aeee99e182d0 100644 --- a/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2.py +++ b/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2.py @@ -24,7 +24,7 @@ name='google/cloud/proto/pubsub/v1/pubsub.proto', package='google.pubsub.v1', syntax='proto3', - serialized_pb=_b('\n)google/cloud/proto/pubsub/v1/pubsub.proto\x12\x10google.pubsub.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x15\n\x05Topic\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xdb\x01\n\rPubsubMessage\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\x43\n\nattributes\x18\x02 \x03(\x0b\x32/.google.pubsub.v1.PubsubMessage.AttributesEntry\x12\x12\n\nmessage_id\x18\x03 \x01(\t\x12\x30\n\x0cpublish_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\x31\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\" \n\x0fGetTopicRequest\x12\r\n\x05topic\x18\x01 \x01(\t\"R\n\x0ePublishRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x31\n\x08messages\x18\x02 \x03(\x0b\x32\x1f.google.pubsub.v1.PubsubMessage\"&\n\x0fPublishResponse\x12\x13\n\x0bmessage_ids\x18\x01 \x03(\t\"K\n\x11ListTopicsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"V\n\x12ListTopicsResponse\x12\'\n\x06topics\x18\x01 \x03(\x0b\x32\x17.google.pubsub.v1.Topic\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"U\n\x1dListTopicSubscriptionsRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"P\n\x1eListTopicSubscriptionsResponse\x12\x15\n\rsubscriptions\x18\x01 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"#\n\x12\x44\x65leteTopicRequest\x12\r\n\x05topic\x18\x01 \x01(\t\"\xda\x01\n\x0cSubscription\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\x31\n\x0bpush_config\x18\x04 \x01(\x0b\x32\x1c.google.pubsub.v1.PushConfig\x12\x1c\n\x14\x61\x63k_deadline_seconds\x18\x05 \x01(\x05\x12\x1d\n\x15retain_acked_messages\x18\x07 \x01(\x08\x12=\n\x1amessage_retention_duration\x18\x08 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x98\x01\n\nPushConfig\x12\x15\n\rpush_endpoint\x18\x01 \x01(\t\x12@\n\nattributes\x18\x02 \x03(\x0b\x32,.google.pubsub.v1.PushConfig.AttributesEntry\x1a\x31\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"S\n\x0fReceivedMessage\x12\x0e\n\x06\x61\x63k_id\x18\x01 \x01(\t\x12\x30\n\x07message\x18\x02 \x01(\x0b\x32\x1f.google.pubsub.v1.PubsubMessage\".\n\x16GetSubscriptionRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\"\x82\x01\n\x19UpdateSubscriptionRequest\x12\x34\n\x0csubscription\x18\x01 \x01(\x0b\x32\x1e.google.pubsub.v1.Subscription\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"R\n\x18ListSubscriptionsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"k\n\x19ListSubscriptionsResponse\x12\x35\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x1e.google.pubsub.v1.Subscription\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"1\n\x19\x44\x65leteSubscriptionRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\"b\n\x17ModifyPushConfigRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x31\n\x0bpush_config\x18\x02 \x01(\x0b\x32\x1c.google.pubsub.v1.PushConfig\"U\n\x0bPullRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x1a\n\x12return_immediately\x18\x02 \x01(\x08\x12\x14\n\x0cmax_messages\x18\x03 \x01(\x05\"L\n\x0cPullResponse\x12<\n\x11received_messages\x18\x01 \x03(\x0b\x32!.google.pubsub.v1.ReceivedMessage\"_\n\x18ModifyAckDeadlineRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x04 \x03(\t\x12\x1c\n\x14\x61\x63k_deadline_seconds\x18\x03 \x01(\x05\";\n\x12\x41\x63knowledgeRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x02 \x03(\t\"\xa4\x01\n\x14StreamingPullRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x02 \x03(\t\x12\x1f\n\x17modify_deadline_seconds\x18\x03 \x03(\x05\x12\x1f\n\x17modify_deadline_ack_ids\x18\x04 \x03(\t\x12#\n\x1bstream_ack_deadline_seconds\x18\x05 \x01(\x05\"U\n\x15StreamingPullResponse\x12<\n\x11received_messages\x18\x01 \x03(\x0b\x32!.google.pubsub.v1.ReceivedMessage\";\n\x15\x43reateSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csubscription\x18\x02 \x01(\t\"X\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12/\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"N\n\x14ListSnapshotsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"_\n\x15ListSnapshotsResponse\x12-\n\tsnapshots\x18\x01 \x03(\x0b\x32\x1a.google.pubsub.v1.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\")\n\x15\x44\x65leteSnapshotRequest\x12\x10\n\x08snapshot\x18\x01 \x01(\t\"m\n\x0bSeekRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12*\n\x04time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x12\n\x08snapshot\x18\x03 \x01(\tH\x00\x42\x08\n\x06target\"\x0e\n\x0cSeekResponse2\xe8\x0f\n\nSubscriber\x12\x86\x01\n\x12\x43reateSubscription\x12\x1e.google.pubsub.v1.Subscription\x1a\x1e.google.pubsub.v1.Subscription\"0\x82\xd3\xe4\x93\x02*\x1a%/v1/{name=projects/*/subscriptions/*}:\x01*\x12\x92\x01\n\x0fGetSubscription\x12(.google.pubsub.v1.GetSubscriptionRequest\x1a\x1e.google.pubsub.v1.Subscription\"5\x82\xd3\xe4\x93\x02/\x12-/v1/{subscription=projects/*/subscriptions/*}\x12\xa0\x01\n\x12UpdateSubscription\x12+.google.pubsub.v1.UpdateSubscriptionRequest\x1a\x1e.google.pubsub.v1.Subscription\"=\x82\xd3\xe4\x93\x02\x37\x32\x32/v1/{subscription.name=projects/*/subscriptions/*}:\x01*\x12\x9c\x01\n\x11ListSubscriptions\x12*.google.pubsub.v1.ListSubscriptionsRequest\x1a+.google.pubsub.v1.ListSubscriptionsResponse\".\x82\xd3\xe4\x93\x02(\x12&/v1/{project=projects/*}/subscriptions\x12\x90\x01\n\x12\x44\x65leteSubscription\x12+.google.pubsub.v1.DeleteSubscriptionRequest\x1a\x16.google.protobuf.Empty\"5\x82\xd3\xe4\x93\x02/*-/v1/{subscription=projects/*/subscriptions/*}\x12\xa3\x01\n\x11ModifyAckDeadline\x12*.google.pubsub.v1.ModifyAckDeadlineRequest\x1a\x16.google.protobuf.Empty\"J\x82\xd3\xe4\x93\x02\x44\"?/v1/{subscription=projects/*/subscriptions/*}:modifyAckDeadline:\x01*\x12\x91\x01\n\x0b\x41\x63knowledge\x12$.google.pubsub.v1.AcknowledgeRequest\x1a\x16.google.protobuf.Empty\"D\x82\xd3\xe4\x93\x02>\"9/v1/{subscription=projects/*/subscriptions/*}:acknowledge:\x01*\x12\x84\x01\n\x04Pull\x12\x1d.google.pubsub.v1.PullRequest\x1a\x1e.google.pubsub.v1.PullResponse\"=\x82\xd3\xe4\x93\x02\x37\"2/v1/{subscription=projects/*/subscriptions/*}:pull:\x01*\x12\x64\n\rStreamingPull\x12&.google.pubsub.v1.StreamingPullRequest\x1a\'.google.pubsub.v1.StreamingPullResponse(\x01\x30\x01\x12\xa0\x01\n\x10ModifyPushConfig\x12).google.pubsub.v1.ModifyPushConfigRequest\x1a\x16.google.protobuf.Empty\"I\x82\xd3\xe4\x93\x02\x43\">/v1/{subscription=projects/*/subscriptions/*}:modifyPushConfig:\x01*\x12\x8c\x01\n\rListSnapshots\x12&.google.pubsub.v1.ListSnapshotsRequest\x1a\'.google.pubsub.v1.ListSnapshotsResponse\"*\x82\xd3\xe4\x93\x02$\x12\"/v1/{project=projects/*}/snapshots\x12\x83\x01\n\x0e\x43reateSnapshot\x12\'.google.pubsub.v1.CreateSnapshotRequest\x1a\x1a.google.pubsub.v1.Snapshot\",\x82\xd3\xe4\x93\x02&\x1a!/v1/{name=projects/*/snapshots/*}:\x01*\x12\x80\x01\n\x0e\x44\x65leteSnapshot\x12\'.google.pubsub.v1.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty\"-\x82\xd3\xe4\x93\x02\'*%/v1/{snapshot=projects/*/snapshots/*}\x12\x84\x01\n\x04Seek\x12\x1d.google.pubsub.v1.SeekRequest\x1a\x1e.google.pubsub.v1.SeekResponse\"=\x82\xd3\xe4\x93\x02\x37\"2/v1/{subscription=projects/*/subscriptions/*}:seek:\x01*2\x9b\x06\n\tPublisher\x12j\n\x0b\x43reateTopic\x12\x17.google.pubsub.v1.Topic\x1a\x17.google.pubsub.v1.Topic\")\x82\xd3\xe4\x93\x02#\x1a\x1e/v1/{name=projects/*/topics/*}:\x01*\x12\x82\x01\n\x07Publish\x12 .google.pubsub.v1.PublishRequest\x1a!.google.pubsub.v1.PublishResponse\"2\x82\xd3\xe4\x93\x02,\"\'/v1/{topic=projects/*/topics/*}:publish:\x01*\x12o\n\x08GetTopic\x12!.google.pubsub.v1.GetTopicRequest\x1a\x17.google.pubsub.v1.Topic\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/v1/{topic=projects/*/topics/*}\x12\x80\x01\n\nListTopics\x12#.google.pubsub.v1.ListTopicsRequest\x1a$.google.pubsub.v1.ListTopicsResponse\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/v1/{project=projects/*}/topics\x12\xb2\x01\n\x16ListTopicSubscriptions\x12/.google.pubsub.v1.ListTopicSubscriptionsRequest\x1a\x30.google.pubsub.v1.ListTopicSubscriptionsResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v1/{topic=projects/*/topics/*}/subscriptions\x12t\n\x0b\x44\x65leteTopic\x12$.google.pubsub.v1.DeleteTopicRequest\x1a\x16.google.protobuf.Empty\"\'\x82\xd3\xe4\x93\x02!*\x1f/v1/{topic=projects/*/topics/*}By\n\x14\x63om.google.pubsub.v1B\x0bPubsubProtoP\x01Z6google.golang.org/genproto/googleapis/pubsub/v1;pubsub\xf8\x01\x01\xaa\x02\x16Google.Cloud.PubSub.V1b\x06proto3') + serialized_pb=_b('\n)google/cloud/proto/pubsub/v1/pubsub.proto\x12\x10google.pubsub.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"y\n\x05Topic\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x33\n\x06labels\x18\x02 \x03(\x0b\x32#.google.pubsub.v1.Topic.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xdb\x01\n\rPubsubMessage\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\x43\n\nattributes\x18\x02 \x03(\x0b\x32/.google.pubsub.v1.PubsubMessage.AttributesEntry\x12\x12\n\nmessage_id\x18\x03 \x01(\t\x12\x30\n\x0cpublish_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\x31\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\" \n\x0fGetTopicRequest\x12\r\n\x05topic\x18\x01 \x01(\t\"m\n\x12UpdateTopicRequest\x12&\n\x05topic\x18\x01 \x01(\x0b\x32\x17.google.pubsub.v1.Topic\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"R\n\x0ePublishRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x31\n\x08messages\x18\x02 \x03(\x0b\x32\x1f.google.pubsub.v1.PubsubMessage\"&\n\x0fPublishResponse\x12\x13\n\x0bmessage_ids\x18\x01 \x03(\t\"K\n\x11ListTopicsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"V\n\x12ListTopicsResponse\x12\'\n\x06topics\x18\x01 \x03(\x0b\x32\x17.google.pubsub.v1.Topic\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"U\n\x1dListTopicSubscriptionsRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"P\n\x1eListTopicSubscriptionsResponse\x12\x15\n\rsubscriptions\x18\x01 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"#\n\x12\x44\x65leteTopicRequest\x12\r\n\x05topic\x18\x01 \x01(\t\"\xc5\x02\n\x0cSubscription\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\x31\n\x0bpush_config\x18\x04 \x01(\x0b\x32\x1c.google.pubsub.v1.PushConfig\x12\x1c\n\x14\x61\x63k_deadline_seconds\x18\x05 \x01(\x05\x12\x1d\n\x15retain_acked_messages\x18\x07 \x01(\x08\x12=\n\x1amessage_retention_duration\x18\x08 \x01(\x0b\x32\x19.google.protobuf.Duration\x12:\n\x06labels\x18\t \x03(\x0b\x32*.google.pubsub.v1.Subscription.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x98\x01\n\nPushConfig\x12\x15\n\rpush_endpoint\x18\x01 \x01(\t\x12@\n\nattributes\x18\x02 \x03(\x0b\x32,.google.pubsub.v1.PushConfig.AttributesEntry\x1a\x31\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"S\n\x0fReceivedMessage\x12\x0e\n\x06\x61\x63k_id\x18\x01 \x01(\t\x12\x30\n\x07message\x18\x02 \x01(\x0b\x32\x1f.google.pubsub.v1.PubsubMessage\".\n\x16GetSubscriptionRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\"\x82\x01\n\x19UpdateSubscriptionRequest\x12\x34\n\x0csubscription\x18\x01 \x01(\x0b\x32\x1e.google.pubsub.v1.Subscription\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"R\n\x18ListSubscriptionsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"k\n\x19ListSubscriptionsResponse\x12\x35\n\rsubscriptions\x18\x01 \x03(\x0b\x32\x1e.google.pubsub.v1.Subscription\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"1\n\x19\x44\x65leteSubscriptionRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\"b\n\x17ModifyPushConfigRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x31\n\x0bpush_config\x18\x02 \x01(\x0b\x32\x1c.google.pubsub.v1.PushConfig\"U\n\x0bPullRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x1a\n\x12return_immediately\x18\x02 \x01(\x08\x12\x14\n\x0cmax_messages\x18\x03 \x01(\x05\"L\n\x0cPullResponse\x12<\n\x11received_messages\x18\x01 \x03(\x0b\x32!.google.pubsub.v1.ReceivedMessage\"_\n\x18ModifyAckDeadlineRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x04 \x03(\t\x12\x1c\n\x14\x61\x63k_deadline_seconds\x18\x03 \x01(\x05\";\n\x12\x41\x63knowledgeRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x02 \x03(\t\"\xa4\x01\n\x14StreamingPullRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x63k_ids\x18\x02 \x03(\t\x12\x1f\n\x17modify_deadline_seconds\x18\x03 \x03(\x05\x12\x1f\n\x17modify_deadline_ack_ids\x18\x04 \x03(\t\x12#\n\x1bstream_ack_deadline_seconds\x18\x05 \x01(\x05\"U\n\x15StreamingPullResponse\x12<\n\x11received_messages\x18\x01 \x03(\x0b\x32!.google.pubsub.v1.ReceivedMessage\";\n\x15\x43reateSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csubscription\x18\x02 \x01(\t\"v\n\x15UpdateSnapshotRequest\x12,\n\x08snapshot\x18\x01 \x01(\x0b\x32\x1a.google.pubsub.v1.Snapshot\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"\xbf\x01\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12/\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x06labels\x18\x04 \x03(\x0b\x32&.google.pubsub.v1.Snapshot.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"N\n\x14ListSnapshotsRequest\x12\x0f\n\x07project\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"_\n\x15ListSnapshotsResponse\x12-\n\tsnapshots\x18\x01 \x03(\x0b\x32\x1a.google.pubsub.v1.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\")\n\x15\x44\x65leteSnapshotRequest\x12\x10\n\x08snapshot\x18\x01 \x01(\t\"m\n\x0bSeekRequest\x12\x14\n\x0csubscription\x18\x01 \x01(\t\x12*\n\x04time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x12\n\x08snapshot\x18\x03 \x01(\tH\x00\x42\x08\n\x06target\"\x0e\n\x0cSeekResponse2\xf7\x10\n\nSubscriber\x12\x86\x01\n\x12\x43reateSubscription\x12\x1e.google.pubsub.v1.Subscription\x1a\x1e.google.pubsub.v1.Subscription\"0\x82\xd3\xe4\x93\x02*\x1a%/v1/{name=projects/*/subscriptions/*}:\x01*\x12\x92\x01\n\x0fGetSubscription\x12(.google.pubsub.v1.GetSubscriptionRequest\x1a\x1e.google.pubsub.v1.Subscription\"5\x82\xd3\xe4\x93\x02/\x12-/v1/{subscription=projects/*/subscriptions/*}\x12\xa0\x01\n\x12UpdateSubscription\x12+.google.pubsub.v1.UpdateSubscriptionRequest\x1a\x1e.google.pubsub.v1.Subscription\"=\x82\xd3\xe4\x93\x02\x37\x32\x32/v1/{subscription.name=projects/*/subscriptions/*}:\x01*\x12\x9c\x01\n\x11ListSubscriptions\x12*.google.pubsub.v1.ListSubscriptionsRequest\x1a+.google.pubsub.v1.ListSubscriptionsResponse\".\x82\xd3\xe4\x93\x02(\x12&/v1/{project=projects/*}/subscriptions\x12\x90\x01\n\x12\x44\x65leteSubscription\x12+.google.pubsub.v1.DeleteSubscriptionRequest\x1a\x16.google.protobuf.Empty\"5\x82\xd3\xe4\x93\x02/*-/v1/{subscription=projects/*/subscriptions/*}\x12\xa3\x01\n\x11ModifyAckDeadline\x12*.google.pubsub.v1.ModifyAckDeadlineRequest\x1a\x16.google.protobuf.Empty\"J\x82\xd3\xe4\x93\x02\x44\"?/v1/{subscription=projects/*/subscriptions/*}:modifyAckDeadline:\x01*\x12\x91\x01\n\x0b\x41\x63knowledge\x12$.google.pubsub.v1.AcknowledgeRequest\x1a\x16.google.protobuf.Empty\"D\x82\xd3\xe4\x93\x02>\"9/v1/{subscription=projects/*/subscriptions/*}:acknowledge:\x01*\x12\x84\x01\n\x04Pull\x12\x1d.google.pubsub.v1.PullRequest\x1a\x1e.google.pubsub.v1.PullResponse\"=\x82\xd3\xe4\x93\x02\x37\"2/v1/{subscription=projects/*/subscriptions/*}:pull:\x01*\x12\x64\n\rStreamingPull\x12&.google.pubsub.v1.StreamingPullRequest\x1a\'.google.pubsub.v1.StreamingPullResponse(\x01\x30\x01\x12\xa0\x01\n\x10ModifyPushConfig\x12).google.pubsub.v1.ModifyPushConfigRequest\x1a\x16.google.protobuf.Empty\"I\x82\xd3\xe4\x93\x02\x43\">/v1/{subscription=projects/*/subscriptions/*}:modifyPushConfig:\x01*\x12\x8c\x01\n\rListSnapshots\x12&.google.pubsub.v1.ListSnapshotsRequest\x1a\'.google.pubsub.v1.ListSnapshotsResponse\"*\x82\xd3\xe4\x93\x02$\x12\"/v1/{project=projects/*}/snapshots\x12\x83\x01\n\x0e\x43reateSnapshot\x12\'.google.pubsub.v1.CreateSnapshotRequest\x1a\x1a.google.pubsub.v1.Snapshot\",\x82\xd3\xe4\x93\x02&\x1a!/v1/{name=projects/*/snapshots/*}:\x01*\x12\x8c\x01\n\x0eUpdateSnapshot\x12\'.google.pubsub.v1.UpdateSnapshotRequest\x1a\x1a.google.pubsub.v1.Snapshot\"5\x82\xd3\xe4\x93\x02/2*/v1/{snapshot.name=projects/*/snapshots/*}:\x01*\x12\x80\x01\n\x0e\x44\x65leteSnapshot\x12\'.google.pubsub.v1.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty\"-\x82\xd3\xe4\x93\x02\'*%/v1/{snapshot=projects/*/snapshots/*}\x12\x84\x01\n\x04Seek\x12\x1d.google.pubsub.v1.SeekRequest\x1a\x1e.google.pubsub.v1.SeekResponse\"=\x82\xd3\xe4\x93\x02\x37\"2/v1/{subscription=projects/*/subscriptions/*}:seek:\x01*2\x9a\x07\n\tPublisher\x12j\n\x0b\x43reateTopic\x12\x17.google.pubsub.v1.Topic\x1a\x17.google.pubsub.v1.Topic\")\x82\xd3\xe4\x93\x02#\x1a\x1e/v1/{name=projects/*/topics/*}:\x01*\x12}\n\x0bUpdateTopic\x12$.google.pubsub.v1.UpdateTopicRequest\x1a\x17.google.pubsub.v1.Topic\"/\x82\xd3\xe4\x93\x02)2$/v1/{topic.name=projects/*/topics/*}:\x01*\x12\x82\x01\n\x07Publish\x12 .google.pubsub.v1.PublishRequest\x1a!.google.pubsub.v1.PublishResponse\"2\x82\xd3\xe4\x93\x02,\"\'/v1/{topic=projects/*/topics/*}:publish:\x01*\x12o\n\x08GetTopic\x12!.google.pubsub.v1.GetTopicRequest\x1a\x17.google.pubsub.v1.Topic\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/v1/{topic=projects/*/topics/*}\x12\x80\x01\n\nListTopics\x12#.google.pubsub.v1.ListTopicsRequest\x1a$.google.pubsub.v1.ListTopicsResponse\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/v1/{project=projects/*}/topics\x12\xb2\x01\n\x16ListTopicSubscriptions\x12/.google.pubsub.v1.ListTopicSubscriptionsRequest\x1a\x30.google.pubsub.v1.ListTopicSubscriptionsResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v1/{topic=projects/*/topics/*}/subscriptions\x12t\n\x0b\x44\x65leteTopic\x12$.google.pubsub.v1.DeleteTopicRequest\x1a\x16.google.protobuf.Empty\"\'\x82\xd3\xe4\x93\x02!*\x1f/v1/{topic=projects/*/topics/*}By\n\x14\x63om.google.pubsub.v1B\x0bPubsubProtoP\x01Z6google.golang.org/genproto/googleapis/pubsub/v1;pubsub\xf8\x01\x01\xaa\x02\x16Google.Cloud.PubSub.V1b\x06proto3') , dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) _sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -32,6 +32,43 @@ +_TOPIC_LABELSENTRY = _descriptor.Descriptor( + name='LabelsEntry', + full_name='google.pubsub.v1.Topic.LabelsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.pubsub.v1.Topic.LabelsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.pubsub.v1.Topic.LabelsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=297, + serialized_end=342, +) + _TOPIC = _descriptor.Descriptor( name='Topic', full_name='google.pubsub.v1.Topic', @@ -46,10 +83,17 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.pubsub.v1.Topic.labels', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), ], extensions=[ ], - nested_types=[], + nested_types=[_TOPIC_LABELSENTRY, ], enum_types=[ ], options=None, @@ -59,7 +103,7 @@ oneofs=[ ], serialized_start=221, - serialized_end=242, + serialized_end=342, ) @@ -96,8 +140,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=415, - serialized_end=464, + serialized_start=515, + serialized_end=564, ) _PUBSUBMESSAGE = _descriptor.Descriptor( @@ -147,8 +191,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=245, - serialized_end=464, + serialized_start=345, + serialized_end=564, ) @@ -178,8 +222,46 @@ extension_ranges=[], oneofs=[ ], - serialized_start=466, - serialized_end=498, + serialized_start=566, + serialized_end=598, +) + + +_UPDATETOPICREQUEST = _descriptor.Descriptor( + name='UpdateTopicRequest', + full_name='google.pubsub.v1.UpdateTopicRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='topic', full_name='google.pubsub.v1.UpdateTopicRequest.topic', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='update_mask', full_name='google.pubsub.v1.UpdateTopicRequest.update_mask', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=600, + serialized_end=709, ) @@ -216,8 +298,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=500, - serialized_end=582, + serialized_start=711, + serialized_end=793, ) @@ -247,8 +329,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=584, - serialized_end=622, + serialized_start=795, + serialized_end=833, ) @@ -292,8 +374,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=624, - serialized_end=699, + serialized_start=835, + serialized_end=910, ) @@ -330,8 +412,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=701, - serialized_end=787, + serialized_start=912, + serialized_end=998, ) @@ -375,8 +457,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=789, - serialized_end=874, + serialized_start=1000, + serialized_end=1085, ) @@ -413,8 +495,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=876, - serialized_end=956, + serialized_start=1087, + serialized_end=1167, ) @@ -444,11 +526,48 @@ extension_ranges=[], oneofs=[ ], - serialized_start=958, - serialized_end=993, + serialized_start=1169, + serialized_end=1204, ) +_SUBSCRIPTION_LABELSENTRY = _descriptor.Descriptor( + name='LabelsEntry', + full_name='google.pubsub.v1.Subscription.LabelsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.pubsub.v1.Subscription.LabelsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.pubsub.v1.Subscription.LabelsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=297, + serialized_end=342, +) + _SUBSCRIPTION = _descriptor.Descriptor( name='Subscription', full_name='google.pubsub.v1.Subscription', @@ -498,10 +617,17 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.pubsub.v1.Subscription.labels', index=6, + number=9, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), ], extensions=[ ], - nested_types=[], + nested_types=[_SUBSCRIPTION_LABELSENTRY, ], enum_types=[ ], options=None, @@ -510,8 +636,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=996, - serialized_end=1214, + serialized_start=1207, + serialized_end=1532, ) @@ -548,8 +674,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=415, - serialized_end=464, + serialized_start=515, + serialized_end=564, ) _PUSHCONFIG = _descriptor.Descriptor( @@ -585,8 +711,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1217, - serialized_end=1369, + serialized_start=1535, + serialized_end=1687, ) @@ -623,8 +749,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1371, - serialized_end=1454, + serialized_start=1689, + serialized_end=1772, ) @@ -654,8 +780,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1456, - serialized_end=1502, + serialized_start=1774, + serialized_end=1820, ) @@ -692,8 +818,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1505, - serialized_end=1635, + serialized_start=1823, + serialized_end=1953, ) @@ -737,8 +863,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1637, - serialized_end=1719, + serialized_start=1955, + serialized_end=2037, ) @@ -775,8 +901,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1721, - serialized_end=1828, + serialized_start=2039, + serialized_end=2146, ) @@ -806,8 +932,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1830, - serialized_end=1879, + serialized_start=2148, + serialized_end=2197, ) @@ -844,8 +970,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1881, - serialized_end=1979, + serialized_start=2199, + serialized_end=2297, ) @@ -889,8 +1015,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1981, - serialized_end=2066, + serialized_start=2299, + serialized_end=2384, ) @@ -920,8 +1046,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2068, - serialized_end=2144, + serialized_start=2386, + serialized_end=2462, ) @@ -965,8 +1091,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2146, - serialized_end=2241, + serialized_start=2464, + serialized_end=2559, ) @@ -1003,8 +1129,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2243, - serialized_end=2302, + serialized_start=2561, + serialized_end=2620, ) @@ -1062,8 +1188,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2305, - serialized_end=2469, + serialized_start=2623, + serialized_end=2787, ) @@ -1093,8 +1219,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2471, - serialized_end=2556, + serialized_start=2789, + serialized_end=2874, ) @@ -1131,11 +1257,86 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2558, - serialized_end=2617, + serialized_start=2876, + serialized_end=2935, +) + + +_UPDATESNAPSHOTREQUEST = _descriptor.Descriptor( + name='UpdateSnapshotRequest', + full_name='google.pubsub.v1.UpdateSnapshotRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='snapshot', full_name='google.pubsub.v1.UpdateSnapshotRequest.snapshot', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='update_mask', full_name='google.pubsub.v1.UpdateSnapshotRequest.update_mask', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2937, + serialized_end=3055, ) +_SNAPSHOT_LABELSENTRY = _descriptor.Descriptor( + name='LabelsEntry', + full_name='google.pubsub.v1.Snapshot.LabelsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.pubsub.v1.Snapshot.LabelsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.pubsub.v1.Snapshot.LabelsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=297, + serialized_end=342, +) + _SNAPSHOT = _descriptor.Descriptor( name='Snapshot', full_name='google.pubsub.v1.Snapshot', @@ -1164,10 +1365,17 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.pubsub.v1.Snapshot.labels', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), ], extensions=[ ], - nested_types=[], + nested_types=[_SNAPSHOT_LABELSENTRY, ], enum_types=[ ], options=None, @@ -1176,8 +1384,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2619, - serialized_end=2707, + serialized_start=3058, + serialized_end=3249, ) @@ -1221,8 +1429,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2709, - serialized_end=2787, + serialized_start=3251, + serialized_end=3329, ) @@ -1259,8 +1467,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2789, - serialized_end=2884, + serialized_start=3331, + serialized_end=3426, ) @@ -1290,8 +1498,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2886, - serialized_end=2927, + serialized_start=3428, + serialized_end=3469, ) @@ -1338,8 +1546,8 @@ name='target', full_name='google.pubsub.v1.SeekRequest.target', index=0, containing_type=None, fields=[]), ], - serialized_start=2929, - serialized_end=3038, + serialized_start=3471, + serialized_end=3580, ) @@ -1362,17 +1570,23 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3040, - serialized_end=3054, + serialized_start=3582, + serialized_end=3596, ) +_TOPIC_LABELSENTRY.containing_type = _TOPIC +_TOPIC.fields_by_name['labels'].message_type = _TOPIC_LABELSENTRY _PUBSUBMESSAGE_ATTRIBUTESENTRY.containing_type = _PUBSUBMESSAGE _PUBSUBMESSAGE.fields_by_name['attributes'].message_type = _PUBSUBMESSAGE_ATTRIBUTESENTRY _PUBSUBMESSAGE.fields_by_name['publish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATETOPICREQUEST.fields_by_name['topic'].message_type = _TOPIC +_UPDATETOPICREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK _PUBLISHREQUEST.fields_by_name['messages'].message_type = _PUBSUBMESSAGE _LISTTOPICSRESPONSE.fields_by_name['topics'].message_type = _TOPIC +_SUBSCRIPTION_LABELSENTRY.containing_type = _SUBSCRIPTION _SUBSCRIPTION.fields_by_name['push_config'].message_type = _PUSHCONFIG _SUBSCRIPTION.fields_by_name['message_retention_duration'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_SUBSCRIPTION.fields_by_name['labels'].message_type = _SUBSCRIPTION_LABELSENTRY _PUSHCONFIG_ATTRIBUTESENTRY.containing_type = _PUSHCONFIG _PUSHCONFIG.fields_by_name['attributes'].message_type = _PUSHCONFIG_ATTRIBUTESENTRY _RECEIVEDMESSAGE.fields_by_name['message'].message_type = _PUBSUBMESSAGE @@ -1382,7 +1596,11 @@ _MODIFYPUSHCONFIGREQUEST.fields_by_name['push_config'].message_type = _PUSHCONFIG _PULLRESPONSE.fields_by_name['received_messages'].message_type = _RECEIVEDMESSAGE _STREAMINGPULLRESPONSE.fields_by_name['received_messages'].message_type = _RECEIVEDMESSAGE +_UPDATESNAPSHOTREQUEST.fields_by_name['snapshot'].message_type = _SNAPSHOT +_UPDATESNAPSHOTREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK +_SNAPSHOT_LABELSENTRY.containing_type = _SNAPSHOT _SNAPSHOT.fields_by_name['expire_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_SNAPSHOT.fields_by_name['labels'].message_type = _SNAPSHOT_LABELSENTRY _LISTSNAPSHOTSRESPONSE.fields_by_name['snapshots'].message_type = _SNAPSHOT _SEEKREQUEST.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _SEEKREQUEST.oneofs_by_name['target'].fields.append( @@ -1394,6 +1612,7 @@ DESCRIPTOR.message_types_by_name['Topic'] = _TOPIC DESCRIPTOR.message_types_by_name['PubsubMessage'] = _PUBSUBMESSAGE DESCRIPTOR.message_types_by_name['GetTopicRequest'] = _GETTOPICREQUEST +DESCRIPTOR.message_types_by_name['UpdateTopicRequest'] = _UPDATETOPICREQUEST DESCRIPTOR.message_types_by_name['PublishRequest'] = _PUBLISHREQUEST DESCRIPTOR.message_types_by_name['PublishResponse'] = _PUBLISHRESPONSE DESCRIPTOR.message_types_by_name['ListTopicsRequest'] = _LISTTOPICSREQUEST @@ -1417,6 +1636,7 @@ DESCRIPTOR.message_types_by_name['StreamingPullRequest'] = _STREAMINGPULLREQUEST DESCRIPTOR.message_types_by_name['StreamingPullResponse'] = _STREAMINGPULLRESPONSE DESCRIPTOR.message_types_by_name['CreateSnapshotRequest'] = _CREATESNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name['UpdateSnapshotRequest'] = _UPDATESNAPSHOTREQUEST DESCRIPTOR.message_types_by_name['Snapshot'] = _SNAPSHOT DESCRIPTOR.message_types_by_name['ListSnapshotsRequest'] = _LISTSNAPSHOTSREQUEST DESCRIPTOR.message_types_by_name['ListSnapshotsResponse'] = _LISTSNAPSHOTSRESPONSE @@ -1425,11 +1645,35 @@ DESCRIPTOR.message_types_by_name['SeekResponse'] = _SEEKRESPONSE Topic = _reflection.GeneratedProtocolMessageType('Topic', (_message.Message,), dict( + + LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( + DESCRIPTOR = _TOPIC_LABELSENTRY, + __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + # @@protoc_insertion_point(class_scope:google.pubsub.v1.Topic.LabelsEntry) + )) + , DESCRIPTOR = _TOPIC, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """A topic resource. + + + Attributes: + name: + The name of the topic. It must have the format + ``"projects/{project}/topics/{topic}"``. ``{topic}`` must + start with a letter, and contain only letters (``[A-Za-z]``), + numbers (``[0-9]``), dashes (``-``), underscores (``_``), + periods (``.``), tildes (``~``), plus (``+``) or percent signs + (``%``). It must be between 3 and 255 characters in length, + and it must not start with ``"goog"``. + labels: + User labels. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.Topic) )) _sym_db.RegisterMessage(Topic) +_sym_db.RegisterMessage(Topic.LabelsEntry) PubsubMessage = _reflection.GeneratedProtocolMessageType('PubsubMessage', (_message.Message,), dict( @@ -1441,6 +1685,28 @@ , DESCRIPTOR = _PUBSUBMESSAGE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """A message data and its attributes. The message payload must not be + empty; it must contain either a non-empty data field, or at least one + attribute. + + + Attributes: + data: + The message payload. + attributes: + Optional attributes for this message. + message_id: + ID of this message, assigned by the server when the message is + published. Guaranteed to be unique within the topic. This + value may be read by a subscriber that receives a + ``PubsubMessage`` via a ``Pull`` call or a push delivery. It + must not be populated by the publisher in a ``Publish`` call. + publish_time: + The time at which the message was published, populated by the + server when it receives the ``Publish`` call. It must not be + populated by the publisher in a ``Publish`` call. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.PubsubMessage) )) _sym_db.RegisterMessage(PubsubMessage) @@ -1449,13 +1715,51 @@ GetTopicRequest = _reflection.GeneratedProtocolMessageType('GetTopicRequest', (_message.Message,), dict( DESCRIPTOR = _GETTOPICREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the GetTopic method. + + + Attributes: + topic: + The name of the topic to get. Format is + ``projects/{project}/topics/{topic}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.GetTopicRequest) )) _sym_db.RegisterMessage(GetTopicRequest) +UpdateTopicRequest = _reflection.GeneratedProtocolMessageType('UpdateTopicRequest', (_message.Message,), dict( + DESCRIPTOR = _UPDATETOPICREQUEST, + __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the UpdateTopic method. + + + Attributes: + topic: + The topic to update. + update_mask: + Indicates which fields in the provided topic to update. Must + be specified and non-empty. + """, + # @@protoc_insertion_point(class_scope:google.pubsub.v1.UpdateTopicRequest) + )) +_sym_db.RegisterMessage(UpdateTopicRequest) + PublishRequest = _reflection.GeneratedProtocolMessageType('PublishRequest', (_message.Message,), dict( DESCRIPTOR = _PUBLISHREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the Publish method. + + + Attributes: + topic: + The messages in the request will be published on this topic. + Format is ``projects/{project}/topics/{topic}``. + messages: + The messages to publish. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.PublishRequest) )) _sym_db.RegisterMessage(PublishRequest) @@ -1463,6 +1767,16 @@ PublishResponse = _reflection.GeneratedProtocolMessageType('PublishResponse', (_message.Message,), dict( DESCRIPTOR = _PUBLISHRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``Publish`` method. + + + Attributes: + message_ids: + The server-assigned ID of each published message, in the same + order as the messages in the request. IDs are guaranteed to be + unique within the topic. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.PublishResponse) )) _sym_db.RegisterMessage(PublishResponse) @@ -1470,6 +1784,22 @@ ListTopicsRequest = _reflection.GeneratedProtocolMessageType('ListTopicsRequest', (_message.Message,), dict( DESCRIPTOR = _LISTTOPICSREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``ListTopics`` method. + + + Attributes: + project: + The name of the cloud project that topics belong to. Format is + ``projects/{project}``. + page_size: + Maximum number of topics to return. + page_token: + The value returned by the last ``ListTopicsResponse``; + indicates that this is a continuation of a prior + ``ListTopics`` call, and that the system should return the + next page of data. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicsRequest) )) _sym_db.RegisterMessage(ListTopicsRequest) @@ -1477,6 +1807,18 @@ ListTopicsResponse = _reflection.GeneratedProtocolMessageType('ListTopicsResponse', (_message.Message,), dict( DESCRIPTOR = _LISTTOPICSRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``ListTopics`` method. + + + Attributes: + topics: + The resulting topics. + next_page_token: + If not empty, indicates that there may be more topics that + match the request; this value should be passed in a new + ``ListTopicsRequest``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicsResponse) )) _sym_db.RegisterMessage(ListTopicsResponse) @@ -1484,6 +1826,22 @@ ListTopicSubscriptionsRequest = _reflection.GeneratedProtocolMessageType('ListTopicSubscriptionsRequest', (_message.Message,), dict( DESCRIPTOR = _LISTTOPICSUBSCRIPTIONSREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``ListTopicSubscriptions`` method. + + + Attributes: + topic: + The name of the topic that subscriptions are attached to. + Format is ``projects/{project}/topics/{topic}``. + page_size: + Maximum number of subscription names to return. + page_token: + The value returned by the last + ``ListTopicSubscriptionsResponse``; indicates that this is a + continuation of a prior ``ListTopicSubscriptions`` call, and + that the system should return the next page of data. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicSubscriptionsRequest) )) _sym_db.RegisterMessage(ListTopicSubscriptionsRequest) @@ -1491,6 +1849,18 @@ ListTopicSubscriptionsResponse = _reflection.GeneratedProtocolMessageType('ListTopicSubscriptionsResponse', (_message.Message,), dict( DESCRIPTOR = _LISTTOPICSUBSCRIPTIONSRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``ListTopicSubscriptions`` method. + + + Attributes: + subscriptions: + The names of the subscriptions that match the request. + next_page_token: + If not empty, indicates that there may be more subscriptions + that match the request; this value should be passed in a new + ``ListTopicSubscriptionsRequest`` to get more subscriptions. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListTopicSubscriptionsResponse) )) _sym_db.RegisterMessage(ListTopicSubscriptionsResponse) @@ -1498,16 +1868,88 @@ DeleteTopicRequest = _reflection.GeneratedProtocolMessageType('DeleteTopicRequest', (_message.Message,), dict( DESCRIPTOR = _DELETETOPICREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``DeleteTopic`` method. + + + Attributes: + topic: + Name of the topic to delete. Format is + ``projects/{project}/topics/{topic}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.DeleteTopicRequest) )) _sym_db.RegisterMessage(DeleteTopicRequest) Subscription = _reflection.GeneratedProtocolMessageType('Subscription', (_message.Message,), dict( + + LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( + DESCRIPTOR = _SUBSCRIPTION_LABELSENTRY, + __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + # @@protoc_insertion_point(class_scope:google.pubsub.v1.Subscription.LabelsEntry) + )) + , DESCRIPTOR = _SUBSCRIPTION, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """A subscription resource. + + + Attributes: + name: + The name of the subscription. It must have the format + ``"projects/{project}/subscriptions/{subscription}"``. + ``{subscription}`` must start with a letter, and contain only + letters (``[A-Za-z]``), numbers (``[0-9]``), dashes (``-``), + underscores (``_``), periods (``.``), tildes (``~``), plus + (``+``) or percent signs (``%``). It must be between 3 and 255 + characters in length, and it must not start with ``"goog"``. + topic: + The name of the topic from which this subscription is + receiving messages. Format is + ``projects/{project}/topics/{topic}``. The value of this field + will be ``_deleted-topic_`` if the topic has been deleted. + push_config: + If push delivery is used with this subscription, this field is + used to configure it. An empty ``pushConfig`` signifies that + the subscriber will pull and ack messages using API methods. + ack_deadline_seconds: + This value is the maximum time after a subscriber receives a + message before the subscriber should acknowledge the message. + After message delivery but before the ack deadline expires and + before the message is acknowledged, it is an outstanding + message and will not be delivered again during that time (on a + best-effort basis). For pull subscriptions, this value is + used as the initial value for the ack deadline. To override + this value for a given message, call ``ModifyAckDeadline`` + with the corresponding ``ack_id`` if using pull. The minimum + custom deadline you can specify is 10 seconds. The maximum + custom deadline you can specify is 600 seconds (10 minutes). + If this parameter is 0, a default value of 10 seconds is used. + For push delivery, this value is also used to set the request + timeout for the call to the push endpoint. If the subscriber + never acknowledges the message, the Pub/Sub system will + eventually redeliver the message. + retain_acked_messages: + Indicates whether to retain acknowledged messages. If true, + then messages are not expunged from the subscription's + backlog, even if they are acknowledged, until they fall out of + the ``message_retention_duration`` window. + message_retention_duration: + How long to retain unacknowledged messages in the + subscription's backlog, from the moment a message is + published. If ``retain_acked_messages`` is true, then this + also configures the retention of acknowledged messages, and + thus configures how far back in time a ``Seek`` can be done. + Defaults to 7 days. Cannot be more than 7 days or less than 10 + minutes. + labels: + User labels. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.Subscription) )) _sym_db.RegisterMessage(Subscription) +_sym_db.RegisterMessage(Subscription.LabelsEntry) PushConfig = _reflection.GeneratedProtocolMessageType('PushConfig', (_message.Message,), dict( @@ -1519,6 +1961,35 @@ , DESCRIPTOR = _PUSHCONFIG, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Configuration for a push delivery endpoint. + + + Attributes: + push_endpoint: + A URL locating the endpoint to which messages should be + pushed. For example, a Webhook endpoint might use + "https://example.com/push". + attributes: + Endpoint configuration attributes. Every endpoint has a set + of API supported attributes that can be used to control + different aspects of the message delivery. The currently + supported attribute is ``x-goog-version``, which you can use + to change the format of the pushed message. This attribute + indicates the version of the data expected by the endpoint. + This controls the shape of the pushed message (i.e., its + fields and metadata). The endpoint version is based on the + version of the Pub/Sub API. If not present during the + ``CreateSubscription`` call, it will default to the version of + the API used to make such call. If not present during a + ``ModifyPushConfig`` call, its value will not be changed. + ``GetSubscription`` calls will always return a valid version, + even if the subscription was created without this attribute. + The possible values for this attribute are: - ``v1beta1``: + uses the push format defined in the v1beta1 Pub/Sub API. - + ``v1`` or ``v1beta2``: uses the push format defined in the v1 + Pub/Sub API. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.PushConfig) )) _sym_db.RegisterMessage(PushConfig) @@ -1527,6 +1998,16 @@ ReceivedMessage = _reflection.GeneratedProtocolMessageType('ReceivedMessage', (_message.Message,), dict( DESCRIPTOR = _RECEIVEDMESSAGE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """A message and its corresponding acknowledgment ID. + + + Attributes: + ack_id: + This ID can be used to acknowledge the received message. + message: + The message. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ReceivedMessage) )) _sym_db.RegisterMessage(ReceivedMessage) @@ -1534,6 +2015,15 @@ GetSubscriptionRequest = _reflection.GeneratedProtocolMessageType('GetSubscriptionRequest', (_message.Message,), dict( DESCRIPTOR = _GETSUBSCRIPTIONREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the GetSubscription method. + + + Attributes: + subscription: + The name of the subscription to get. Format is + ``projects/{project}/subscriptions/{sub}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.GetSubscriptionRequest) )) _sym_db.RegisterMessage(GetSubscriptionRequest) @@ -1541,6 +2031,17 @@ UpdateSubscriptionRequest = _reflection.GeneratedProtocolMessageType('UpdateSubscriptionRequest', (_message.Message,), dict( DESCRIPTOR = _UPDATESUBSCRIPTIONREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the UpdateSubscription method. + + + Attributes: + subscription: + The updated subscription object. + update_mask: + Indicates which fields in the provided subscription to update. + Must be specified and non-empty. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.UpdateSubscriptionRequest) )) _sym_db.RegisterMessage(UpdateSubscriptionRequest) @@ -1548,6 +2049,22 @@ ListSubscriptionsRequest = _reflection.GeneratedProtocolMessageType('ListSubscriptionsRequest', (_message.Message,), dict( DESCRIPTOR = _LISTSUBSCRIPTIONSREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``ListSubscriptions`` method. + + + Attributes: + project: + The name of the cloud project that subscriptions belong to. + Format is ``projects/{project}``. + page_size: + Maximum number of subscriptions to return. + page_token: + The value returned by the last ``ListSubscriptionsResponse``; + indicates that this is a continuation of a prior + ``ListSubscriptions`` call, and that the system should return + the next page of data. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListSubscriptionsRequest) )) _sym_db.RegisterMessage(ListSubscriptionsRequest) @@ -1555,6 +2072,18 @@ ListSubscriptionsResponse = _reflection.GeneratedProtocolMessageType('ListSubscriptionsResponse', (_message.Message,), dict( DESCRIPTOR = _LISTSUBSCRIPTIONSRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``ListSubscriptions`` method. + + + Attributes: + subscriptions: + The subscriptions that match the request. + next_page_token: + If not empty, indicates that there may be more subscriptions + that match the request; this value should be passed in a new + ``ListSubscriptionsRequest`` to get more subscriptions. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListSubscriptionsResponse) )) _sym_db.RegisterMessage(ListSubscriptionsResponse) @@ -1562,6 +2091,15 @@ DeleteSubscriptionRequest = _reflection.GeneratedProtocolMessageType('DeleteSubscriptionRequest', (_message.Message,), dict( DESCRIPTOR = _DELETESUBSCRIPTIONREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the DeleteSubscription method. + + + Attributes: + subscription: + The subscription to delete. Format is + ``projects/{project}/subscriptions/{sub}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.DeleteSubscriptionRequest) )) _sym_db.RegisterMessage(DeleteSubscriptionRequest) @@ -1569,6 +2107,21 @@ ModifyPushConfigRequest = _reflection.GeneratedProtocolMessageType('ModifyPushConfigRequest', (_message.Message,), dict( DESCRIPTOR = _MODIFYPUSHCONFIGREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ModifyPushConfig method. + + + Attributes: + subscription: + The name of the subscription. Format is + ``projects/{project}/subscriptions/{sub}``. + push_config: + The push configuration for future deliveries. An empty + ``pushConfig`` indicates that the Pub/Sub system should stop + pushing messages from the given subscription and allow + messages to be pulled and acknowledged - effectively pausing + the subscription if ``Pull`` is not called. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ModifyPushConfigRequest) )) _sym_db.RegisterMessage(ModifyPushConfigRequest) @@ -1576,6 +2129,26 @@ PullRequest = _reflection.GeneratedProtocolMessageType('PullRequest', (_message.Message,), dict( DESCRIPTOR = _PULLREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``Pull`` method. + + + Attributes: + subscription: + The subscription from which messages should be pulled. Format + is ``projects/{project}/subscriptions/{sub}``. + return_immediately: + If this field set to true, the system will respond immediately + even if it there are no messages available to return in the + ``Pull`` response. Otherwise, the system may wait (for a + bounded amount of time) until at least one message is + available, rather than returning no messages. The client may + cancel the request if it does not wish to wait any longer for + the response. + max_messages: + The maximum number of messages returned for this request. The + Pub/Sub system may return fewer than the number specified. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.PullRequest) )) _sym_db.RegisterMessage(PullRequest) @@ -1583,6 +2156,18 @@ PullResponse = _reflection.GeneratedProtocolMessageType('PullResponse', (_message.Message,), dict( DESCRIPTOR = _PULLRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``Pull`` method. + + + Attributes: + received_messages: + Received Pub/Sub messages. The Pub/Sub system will return zero + messages if there are no more available in the backlog. The + Pub/Sub system may return fewer than the ``maxMessages`` + requested even if there are more messages available in the + backlog. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.PullResponse) )) _sym_db.RegisterMessage(PullResponse) @@ -1590,6 +2175,26 @@ ModifyAckDeadlineRequest = _reflection.GeneratedProtocolMessageType('ModifyAckDeadlineRequest', (_message.Message,), dict( DESCRIPTOR = _MODIFYACKDEADLINEREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ModifyAckDeadline method. + + + Attributes: + subscription: + The name of the subscription. Format is + ``projects/{project}/subscriptions/{sub}``. + ack_ids: + List of acknowledgment IDs. + ack_deadline_seconds: + The new ack deadline with respect to the time this request was + sent to the Pub/Sub system. For example, if the value is 10, + the new ack deadline will expire 10 seconds after the + ``ModifyAckDeadline`` call was made. Specifying zero may + immediately make the message available for another pull + request. The minimum deadline you can specify is 0 seconds. + The maximum deadline you can specify is 600 seconds (10 + minutes). + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ModifyAckDeadlineRequest) )) _sym_db.RegisterMessage(ModifyAckDeadlineRequest) @@ -1597,6 +2202,19 @@ AcknowledgeRequest = _reflection.GeneratedProtocolMessageType('AcknowledgeRequest', (_message.Message,), dict( DESCRIPTOR = _ACKNOWLEDGEREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the Acknowledge method. + + + Attributes: + subscription: + The subscription whose message is being acknowledged. Format + is ``projects/{project}/subscriptions/{sub}``. + ack_ids: + The acknowledgment ID for the messages being acknowledged that + was returned by the Pub/Sub system in the ``Pull`` response. + Must not be empty. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.AcknowledgeRequest) )) _sym_db.RegisterMessage(AcknowledgeRequest) @@ -1604,6 +2222,55 @@ StreamingPullRequest = _reflection.GeneratedProtocolMessageType('StreamingPullRequest', (_message.Message,), dict( DESCRIPTOR = _STREAMINGPULLREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``StreamingPull`` streaming RPC method. This request is + used to establish the initial stream as well as to stream + acknowledgements and ack deadline modifications from the client to the + server. + + + Attributes: + subscription: + The subscription for which to initialize the new stream. This + must be provided in the first request on the stream, and must + not be set in subsequent requests from client to server. + Format is ``projects/{project}/subscriptions/{sub}``. + ack_ids: + List of acknowledgement IDs for acknowledging previously + received messages (received on this stream or a different + stream). If an ack ID has expired, the corresponding message + may be redelivered later. Acknowledging a message more than + once will not result in an error. If the acknowledgement ID is + malformed, the stream will be aborted with status + ``INVALID_ARGUMENT``. + modify_deadline_seconds: + The list of new ack deadlines for the IDs listed in + ``modify_deadline_ack_ids``. The size of this list must be the + same as the size of ``modify_deadline_ack_ids``. If it differs + the stream will be aborted with ``INVALID_ARGUMENT``. Each + element in this list is applied to the element in the same + position in ``modify_deadline_ack_ids``. The new ack deadline + is with respect to the time this request was sent to the + Pub/Sub system. Must be >= 0. For example, if the value is 10, + the new ack deadline will expire 10 seconds after this request + is received. If the value is 0, the message is immediately + made available for another streaming or non-streaming pull + request. If the value is < 0 (an error), the stream will be + aborted with status ``INVALID_ARGUMENT``. + modify_deadline_ack_ids: + List of acknowledgement IDs whose deadline will be modified + based on the corresponding element in + ``modify_deadline_seconds``. This field can be used to + indicate that more time is needed to process a message by the + subscriber, or to make the message available for redelivery if + the processing was interrupted. + stream_ack_deadline_seconds: + The ack deadline to use for the stream. This must be provided + in the first request on the stream, but it can also be updated + on subsequent requests from client to server. The minimum + deadline you can specify is 10 seconds. The maximum deadline + you can specify is 600 seconds (10 minutes). + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.StreamingPullRequest) )) _sym_db.RegisterMessage(StreamingPullRequest) @@ -1611,6 +2278,15 @@ StreamingPullResponse = _reflection.GeneratedProtocolMessageType('StreamingPullResponse', (_message.Message,), dict( DESCRIPTOR = _STREAMINGPULLRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``StreamingPull`` method. This response is used to + stream messages from the server to the client. + + + Attributes: + received_messages: + Received Pub/Sub messages. This will not be empty. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.StreamingPullResponse) )) _sym_db.RegisterMessage(StreamingPullResponse) @@ -1618,20 +2294,109 @@ CreateSnapshotRequest = _reflection.GeneratedProtocolMessageType('CreateSnapshotRequest', (_message.Message,), dict( DESCRIPTOR = _CREATESNAPSHOTREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``CreateSnapshot`` method. + + + Attributes: + name: + Optional user-provided name for this snapshot. If the name is + not provided in the request, the server will assign a random + name for this snapshot on the same project as the + subscription. Note that for REST API requests, you must + specify a name. Format is + ``projects/{project}/snapshots/{snap}``. + subscription: + The subscription whose backlog the snapshot retains. + Specifically, the created snapshot is guaranteed to retain: + (a) The existing backlog on the subscription. More precisely, + this is defined as the messages in the subscription's backlog + that are unacknowledged upon the successful completion of the + ``CreateSnapshot`` request; as well as: (b) Any messages + published to the subscription's topic following the successful + completion of the CreateSnapshot request. Format is + ``projects/{project}/subscriptions/{sub}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.CreateSnapshotRequest) )) _sym_db.RegisterMessage(CreateSnapshotRequest) +UpdateSnapshotRequest = _reflection.GeneratedProtocolMessageType('UpdateSnapshotRequest', (_message.Message,), dict( + DESCRIPTOR = _UPDATESNAPSHOTREQUEST, + __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the UpdateSnapshot method. + + + Attributes: + snapshot: + The updated snpashot object. + update_mask: + Indicates which fields in the provided snapshot to update. + Must be specified and non-empty. + """, + # @@protoc_insertion_point(class_scope:google.pubsub.v1.UpdateSnapshotRequest) + )) +_sym_db.RegisterMessage(UpdateSnapshotRequest) + Snapshot = _reflection.GeneratedProtocolMessageType('Snapshot', (_message.Message,), dict( + + LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( + DESCRIPTOR = _SNAPSHOT_LABELSENTRY, + __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + # @@protoc_insertion_point(class_scope:google.pubsub.v1.Snapshot.LabelsEntry) + )) + , DESCRIPTOR = _SNAPSHOT, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """A snapshot resource. + + + Attributes: + name: + The name of the snapshot. + topic: + The name of the topic from which this snapshot is retaining + messages. + expire_time: + The snapshot is guaranteed to exist up until this time. A + newly-created snapshot expires no later than 7 days from the + time of its creation. Its exact lifetime is determined at + creation by the existing backlog in the source subscription. + Specifically, the lifetime of the snapshot is ``7 days - (age + of oldest unacked message in the subscription)``. For example, + consider a subscription whose oldest unacked message is 3 days + old. If a snapshot is created from this subscription, the + snapshot -- which will always capture this 3-day-old backlog + as long as the snapshot exists -- will expire in 4 days. + labels: + User labels. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.Snapshot) )) _sym_db.RegisterMessage(Snapshot) +_sym_db.RegisterMessage(Snapshot.LabelsEntry) ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType('ListSnapshotsRequest', (_message.Message,), dict( DESCRIPTOR = _LISTSNAPSHOTSREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``ListSnapshots`` method. + + + Attributes: + project: + The name of the cloud project that snapshots belong to. Format + is ``projects/{project}``. + page_size: + Maximum number of snapshots to return. + page_token: + The value returned by the last ``ListSnapshotsResponse``; + indicates that this is a continuation of a prior + ``ListSnapshots`` call, and that the system should return the + next page of data. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListSnapshotsRequest) )) _sym_db.RegisterMessage(ListSnapshotsRequest) @@ -1639,6 +2404,18 @@ ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType('ListSnapshotsResponse', (_message.Message,), dict( DESCRIPTOR = _LISTSNAPSHOTSRESPONSE, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Response for the ``ListSnapshots`` method. + + + Attributes: + snapshots: + The resulting snapshots. + next_page_token: + If not empty, indicates that there may be more snapshot that + match the request; this value should be passed in a new + ``ListSnapshotsRequest``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.ListSnapshotsResponse) )) _sym_db.RegisterMessage(ListSnapshotsResponse) @@ -1646,6 +2423,15 @@ DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType('DeleteSnapshotRequest', (_message.Message,), dict( DESCRIPTOR = _DELETESNAPSHOTREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``DeleteSnapshot`` method. + + + Attributes: + snapshot: + The name of the snapshot to delete. Format is + ``projects/{project}/snapshots/{snap}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.DeleteSnapshotRequest) )) _sym_db.RegisterMessage(DeleteSnapshotRequest) @@ -1653,6 +2439,31 @@ SeekRequest = _reflection.GeneratedProtocolMessageType('SeekRequest', (_message.Message,), dict( DESCRIPTOR = _SEEKREQUEST, __module__ = 'google.cloud.proto.pubsub.v1.pubsub_pb2' + , + __doc__ = """Request for the ``Seek`` method. + + + Attributes: + subscription: + The subscription to affect. + time: + The time to seek to. Messages retained in the subscription + that were published before this time are marked as + acknowledged, and messages retained in the subscription that + were published after this time are marked as unacknowledged. + Note that this operation affects only those messages retained + in the subscription (configured by the combination of + ``message_retention_duration`` and ``retain_acked_messages``). + For example, if ``time`` corresponds to a point before the + message retention window (or to a point before the system's + notion of the subscription creation time), only retained + messages will be marked as unacknowledged, and already- + expunged messages will not be restored. + snapshot: + The snapshot to seek to. The snapshot's topic must be the same + as that of the provided subscription. Format is + ``projects/{project}/snapshots/{snap}``. + """, # @@protoc_insertion_point(class_scope:google.pubsub.v1.SeekRequest) )) _sym_db.RegisterMessage(SeekRequest) @@ -1667,10 +2478,16 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\024com.google.pubsub.v1B\013PubsubProtoP\001Z6google.golang.org/genproto/googleapis/pubsub/v1;pubsub\370\001\001\252\002\026Google.Cloud.PubSub.V1')) +_TOPIC_LABELSENTRY.has_options = True +_TOPIC_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _PUBSUBMESSAGE_ATTRIBUTESENTRY.has_options = True _PUBSUBMESSAGE_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_SUBSCRIPTION_LABELSENTRY.has_options = True +_SUBSCRIPTION_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _PUSHCONFIG_ATTRIBUTESENTRY.has_options = True _PUSHCONFIG_ATTRIBUTESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_SNAPSHOT_LABELSENTRY.has_options = True +_SNAPSHOT_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) try: # THESE ELEMENTS WILL BE DEPRECATED. # Please use the generated *_pb2_grpc.py files instead. @@ -1752,6 +2569,11 @@ def __init__(self, channel): request_serializer=CreateSnapshotRequest.SerializeToString, response_deserializer=Snapshot.FromString, ) + self.UpdateSnapshot = channel.unary_unary( + '/google.pubsub.v1.Subscriber/UpdateSnapshot', + request_serializer=UpdateSnapshotRequest.SerializeToString, + response_deserializer=Snapshot.FromString, + ) self.DeleteSnapshot = channel.unary_unary( '/google.pubsub.v1.Subscriber/DeleteSnapshot', request_serializer=DeleteSnapshotRequest.SerializeToString, @@ -1795,6 +2617,10 @@ def GetSubscription(self, request, context): def UpdateSubscription(self, request, context): """Updates an existing subscription. Note that certain properties of a subscription, such as its topic, are not modifiable. + NOTE: The style guide requires body: "subscription" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') @@ -1905,6 +2731,18 @@ def CreateSnapshot(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def UpdateSnapshot(self, request, context): + """Updates an existing snapshot. Note that certain properties of a snapshot + are not modifiable. + NOTE: The style guide requires body: "snapshot" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def DeleteSnapshot(self, request, context): """Removes an existing snapshot. All messages retained in the snapshot are immediately dropped. After a snapshot is deleted, a new one may be @@ -1986,6 +2824,11 @@ def add_SubscriberServicer_to_server(servicer, server): request_deserializer=CreateSnapshotRequest.FromString, response_serializer=Snapshot.SerializeToString, ), + 'UpdateSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.UpdateSnapshot, + request_deserializer=UpdateSnapshotRequest.FromString, + response_serializer=Snapshot.SerializeToString, + ), 'DeleteSnapshot': grpc.unary_unary_rpc_method_handler( servicer.DeleteSnapshot, request_deserializer=DeleteSnapshotRequest.FromString, @@ -2018,6 +2861,11 @@ def __init__(self, channel): request_serializer=Topic.SerializeToString, response_deserializer=Topic.FromString, ) + self.UpdateTopic = channel.unary_unary( + '/google.pubsub.v1.Publisher/UpdateTopic', + request_serializer=UpdateTopicRequest.SerializeToString, + response_deserializer=Topic.FromString, + ) self.Publish = channel.unary_unary( '/google.pubsub.v1.Publisher/Publish', request_serializer=PublishRequest.SerializeToString, @@ -2057,6 +2905,18 @@ def CreateTopic(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def UpdateTopic(self, request, context): + """Updates an existing topic. Note that certain properties of a topic are not + modifiable. Options settings follow the style guide: + NOTE: The style guide requires body: "topic" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def Publish(self, request, context): """Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The message payload must not be empty; it must contain @@ -2106,6 +2966,11 @@ def add_PublisherServicer_to_server(servicer, server): request_deserializer=Topic.FromString, response_serializer=Topic.SerializeToString, ), + 'UpdateTopic': grpc.unary_unary_rpc_method_handler( + servicer.UpdateTopic, + request_deserializer=UpdateTopicRequest.FromString, + response_serializer=Topic.SerializeToString, + ), 'Publish': grpc.unary_unary_rpc_method_handler( servicer.Publish, request_deserializer=PublishRequest.FromString, @@ -2166,6 +3031,10 @@ def GetSubscription(self, request, context): def UpdateSubscription(self, request, context): """Updates an existing subscription. Note that certain properties of a subscription, such as its topic, are not modifiable. + NOTE: The style guide requires body: "subscription" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def ListSubscriptions(self, request, context): @@ -2246,6 +3115,15 @@ def CreateSnapshot(self, request, context): Note that for REST API requests, you must specify a name in the request. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def UpdateSnapshot(self, request, context): + """Updates an existing snapshot. Note that certain properties of a snapshot + are not modifiable. + NOTE: The style guide requires body: "snapshot" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def DeleteSnapshot(self, request, context): """Removes an existing snapshot. All messages retained in the snapshot are immediately dropped. After a snapshot is deleted, a new one may be @@ -2291,6 +3169,10 @@ def GetSubscription(self, request, timeout, metadata=None, with_call=False, prot def UpdateSubscription(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Updates an existing subscription. Note that certain properties of a subscription, such as its topic, are not modifiable. + NOTE: The style guide requires body: "subscription" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. """ raise NotImplementedError() UpdateSubscription.future = None @@ -2380,6 +3262,16 @@ def CreateSnapshot(self, request, timeout, metadata=None, with_call=False, proto """ raise NotImplementedError() CreateSnapshot.future = None + def UpdateSnapshot(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Updates an existing snapshot. Note that certain properties of a snapshot + are not modifiable. + NOTE: The style guide requires body: "snapshot" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + raise NotImplementedError() + UpdateSnapshot.future = None def DeleteSnapshot(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Removes an existing snapshot. All messages retained in the snapshot are immediately dropped. After a snapshot is deleted, a new one may be @@ -2416,6 +3308,7 @@ def beta_create_Subscriber_server(servicer, pool=None, pool_size=None, default_t ('google.pubsub.v1.Subscriber', 'Pull'): PullRequest.FromString, ('google.pubsub.v1.Subscriber', 'Seek'): SeekRequest.FromString, ('google.pubsub.v1.Subscriber', 'StreamingPull'): StreamingPullRequest.FromString, + ('google.pubsub.v1.Subscriber', 'UpdateSnapshot'): UpdateSnapshotRequest.FromString, ('google.pubsub.v1.Subscriber', 'UpdateSubscription'): UpdateSubscriptionRequest.FromString, } response_serializers = { @@ -2432,6 +3325,7 @@ def beta_create_Subscriber_server(servicer, pool=None, pool_size=None, default_t ('google.pubsub.v1.Subscriber', 'Pull'): PullResponse.SerializeToString, ('google.pubsub.v1.Subscriber', 'Seek'): SeekResponse.SerializeToString, ('google.pubsub.v1.Subscriber', 'StreamingPull'): StreamingPullResponse.SerializeToString, + ('google.pubsub.v1.Subscriber', 'UpdateSnapshot'): Snapshot.SerializeToString, ('google.pubsub.v1.Subscriber', 'UpdateSubscription'): Subscription.SerializeToString, } method_implementations = { @@ -2448,6 +3342,7 @@ def beta_create_Subscriber_server(servicer, pool=None, pool_size=None, default_t ('google.pubsub.v1.Subscriber', 'Pull'): face_utilities.unary_unary_inline(servicer.Pull), ('google.pubsub.v1.Subscriber', 'Seek'): face_utilities.unary_unary_inline(servicer.Seek), ('google.pubsub.v1.Subscriber', 'StreamingPull'): face_utilities.stream_stream_inline(servicer.StreamingPull), + ('google.pubsub.v1.Subscriber', 'UpdateSnapshot'): face_utilities.unary_unary_inline(servicer.UpdateSnapshot), ('google.pubsub.v1.Subscriber', 'UpdateSubscription'): face_utilities.unary_unary_inline(servicer.UpdateSubscription), } server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) @@ -2474,6 +3369,7 @@ def beta_create_Subscriber_stub(channel, host=None, metadata_transformer=None, p ('google.pubsub.v1.Subscriber', 'Pull'): PullRequest.SerializeToString, ('google.pubsub.v1.Subscriber', 'Seek'): SeekRequest.SerializeToString, ('google.pubsub.v1.Subscriber', 'StreamingPull'): StreamingPullRequest.SerializeToString, + ('google.pubsub.v1.Subscriber', 'UpdateSnapshot'): UpdateSnapshotRequest.SerializeToString, ('google.pubsub.v1.Subscriber', 'UpdateSubscription'): UpdateSubscriptionRequest.SerializeToString, } response_deserializers = { @@ -2490,6 +3386,7 @@ def beta_create_Subscriber_stub(channel, host=None, metadata_transformer=None, p ('google.pubsub.v1.Subscriber', 'Pull'): PullResponse.FromString, ('google.pubsub.v1.Subscriber', 'Seek'): SeekResponse.FromString, ('google.pubsub.v1.Subscriber', 'StreamingPull'): StreamingPullResponse.FromString, + ('google.pubsub.v1.Subscriber', 'UpdateSnapshot'): Snapshot.FromString, ('google.pubsub.v1.Subscriber', 'UpdateSubscription'): Subscription.FromString, } cardinalities = { @@ -2506,6 +3403,7 @@ def beta_create_Subscriber_stub(channel, host=None, metadata_transformer=None, p 'Pull': cardinality.Cardinality.UNARY_UNARY, 'Seek': cardinality.Cardinality.UNARY_UNARY, 'StreamingPull': cardinality.Cardinality.STREAM_STREAM, + 'UpdateSnapshot': cardinality.Cardinality.UNARY_UNARY, 'UpdateSubscription': cardinality.Cardinality.UNARY_UNARY, } stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) @@ -2525,6 +3423,15 @@ def CreateTopic(self, request, context): """Creates the given topic with the given name. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def UpdateTopic(self, request, context): + """Updates an existing topic. Note that certain properties of a topic are not + modifiable. Options settings follow the style guide: + NOTE: The style guide requires body: "topic" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def Publish(self, request, context): """Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The message payload must not be empty; it must contain @@ -2567,6 +3474,16 @@ def CreateTopic(self, request, timeout, metadata=None, with_call=False, protocol """ raise NotImplementedError() CreateTopic.future = None + def UpdateTopic(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Updates an existing topic. Note that certain properties of a topic are not + modifiable. Options settings follow the style guide: + NOTE: The style guide requires body: "topic" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + raise NotImplementedError() + UpdateTopic.future = None def Publish(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The message payload must not be empty; it must contain @@ -2613,6 +3530,7 @@ def beta_create_Publisher_server(servicer, pool=None, pool_size=None, default_ti ('google.pubsub.v1.Publisher', 'ListTopicSubscriptions'): ListTopicSubscriptionsRequest.FromString, ('google.pubsub.v1.Publisher', 'ListTopics'): ListTopicsRequest.FromString, ('google.pubsub.v1.Publisher', 'Publish'): PublishRequest.FromString, + ('google.pubsub.v1.Publisher', 'UpdateTopic'): UpdateTopicRequest.FromString, } response_serializers = { ('google.pubsub.v1.Publisher', 'CreateTopic'): Topic.SerializeToString, @@ -2621,6 +3539,7 @@ def beta_create_Publisher_server(servicer, pool=None, pool_size=None, default_ti ('google.pubsub.v1.Publisher', 'ListTopicSubscriptions'): ListTopicSubscriptionsResponse.SerializeToString, ('google.pubsub.v1.Publisher', 'ListTopics'): ListTopicsResponse.SerializeToString, ('google.pubsub.v1.Publisher', 'Publish'): PublishResponse.SerializeToString, + ('google.pubsub.v1.Publisher', 'UpdateTopic'): Topic.SerializeToString, } method_implementations = { ('google.pubsub.v1.Publisher', 'CreateTopic'): face_utilities.unary_unary_inline(servicer.CreateTopic), @@ -2629,6 +3548,7 @@ def beta_create_Publisher_server(servicer, pool=None, pool_size=None, default_ti ('google.pubsub.v1.Publisher', 'ListTopicSubscriptions'): face_utilities.unary_unary_inline(servicer.ListTopicSubscriptions), ('google.pubsub.v1.Publisher', 'ListTopics'): face_utilities.unary_unary_inline(servicer.ListTopics), ('google.pubsub.v1.Publisher', 'Publish'): face_utilities.unary_unary_inline(servicer.Publish), + ('google.pubsub.v1.Publisher', 'UpdateTopic'): face_utilities.unary_unary_inline(servicer.UpdateTopic), } server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) return beta_implementations.server(method_implementations, options=server_options) @@ -2647,6 +3567,7 @@ def beta_create_Publisher_stub(channel, host=None, metadata_transformer=None, po ('google.pubsub.v1.Publisher', 'ListTopicSubscriptions'): ListTopicSubscriptionsRequest.SerializeToString, ('google.pubsub.v1.Publisher', 'ListTopics'): ListTopicsRequest.SerializeToString, ('google.pubsub.v1.Publisher', 'Publish'): PublishRequest.SerializeToString, + ('google.pubsub.v1.Publisher', 'UpdateTopic'): UpdateTopicRequest.SerializeToString, } response_deserializers = { ('google.pubsub.v1.Publisher', 'CreateTopic'): Topic.FromString, @@ -2655,6 +3576,7 @@ def beta_create_Publisher_stub(channel, host=None, metadata_transformer=None, po ('google.pubsub.v1.Publisher', 'ListTopicSubscriptions'): ListTopicSubscriptionsResponse.FromString, ('google.pubsub.v1.Publisher', 'ListTopics'): ListTopicsResponse.FromString, ('google.pubsub.v1.Publisher', 'Publish'): PublishResponse.FromString, + ('google.pubsub.v1.Publisher', 'UpdateTopic'): Topic.FromString, } cardinalities = { 'CreateTopic': cardinality.Cardinality.UNARY_UNARY, @@ -2663,6 +3585,7 @@ def beta_create_Publisher_stub(channel, host=None, metadata_transformer=None, po 'ListTopicSubscriptions': cardinality.Cardinality.UNARY_UNARY, 'ListTopics': cardinality.Cardinality.UNARY_UNARY, 'Publish': cardinality.Cardinality.UNARY_UNARY, + 'UpdateTopic': cardinality.Cardinality.UNARY_UNARY, } stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) return beta_implementations.dynamic_stub(channel, 'google.pubsub.v1.Publisher', cardinalities, options=stub_options) diff --git a/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2_grpc.py b/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2_grpc.py index 5a970cbc77ab..06dd470470d8 100644 --- a/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2_grpc.py +++ b/pubsub/google/cloud/proto/pubsub/v1/pubsub_pb2_grpc.py @@ -76,6 +76,11 @@ def __init__(self, channel): request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.CreateSnapshotRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Snapshot.FromString, ) + self.UpdateSnapshot = channel.unary_unary( + '/google.pubsub.v1.Subscriber/UpdateSnapshot', + request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.UpdateSnapshotRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Snapshot.FromString, + ) self.DeleteSnapshot = channel.unary_unary( '/google.pubsub.v1.Subscriber/DeleteSnapshot', request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.DeleteSnapshotRequest.SerializeToString, @@ -119,6 +124,10 @@ def GetSubscription(self, request, context): def UpdateSubscription(self, request, context): """Updates an existing subscription. Note that certain properties of a subscription, such as its topic, are not modifiable. + NOTE: The style guide requires body: "subscription" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') @@ -229,6 +238,18 @@ def CreateSnapshot(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def UpdateSnapshot(self, request, context): + """Updates an existing snapshot. Note that certain properties of a snapshot + are not modifiable. + NOTE: The style guide requires body: "snapshot" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def DeleteSnapshot(self, request, context): """Removes an existing snapshot. All messages retained in the snapshot are immediately dropped. After a snapshot is deleted, a new one may be @@ -310,6 +331,11 @@ def add_SubscriberServicer_to_server(servicer, server): request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.CreateSnapshotRequest.FromString, response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Snapshot.SerializeToString, ), + 'UpdateSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.UpdateSnapshot, + request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.UpdateSnapshotRequest.FromString, + response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Snapshot.SerializeToString, + ), 'DeleteSnapshot': grpc.unary_unary_rpc_method_handler( servicer.DeleteSnapshot, request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.DeleteSnapshotRequest.FromString, @@ -342,6 +368,11 @@ def __init__(self, channel): request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.SerializeToString, response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.FromString, ) + self.UpdateTopic = channel.unary_unary( + '/google.pubsub.v1.Publisher/UpdateTopic', + request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.UpdateTopicRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.FromString, + ) self.Publish = channel.unary_unary( '/google.pubsub.v1.Publisher/Publish', request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.PublishRequest.SerializeToString, @@ -381,6 +412,18 @@ def CreateTopic(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def UpdateTopic(self, request, context): + """Updates an existing topic. Note that certain properties of a topic are not + modifiable. Options settings follow the style guide: + NOTE: The style guide requires body: "topic" instead of body: "*". + Keeping the latter for internal consistency in V1, however it should be + corrected in V2. See + https://cloud.google.com/apis/design/standard_methods#update for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def Publish(self, request, context): """Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The message payload must not be empty; it must contain @@ -430,6 +473,11 @@ def add_PublisherServicer_to_server(servicer, server): request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.FromString, response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.SerializeToString, ), + 'UpdateTopic': grpc.unary_unary_rpc_method_handler( + servicer.UpdateTopic, + request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.UpdateTopicRequest.FromString, + response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.SerializeToString, + ), 'Publish': grpc.unary_unary_rpc_method_handler( servicer.Publish, request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.PublishRequest.FromString, diff --git a/pubsub/google/cloud/pubsub_v1/__init__.py b/pubsub/google/cloud/pubsub_v1/__init__.py index 7e785dc9dc7a..21706f6eee5e 100644 --- a/pubsub/google/cloud/pubsub_v1/__init__.py +++ b/pubsub/google/cloud/pubsub_v1/__init__.py @@ -15,8 +15,8 @@ from __future__ import absolute_import from google.cloud.pubsub_v1 import types -from google.cloud.pubsub_v1.publisher import PublisherClient -from google.cloud.pubsub_v1.subscriber import SubscriberClient +from google.cloud.pubsub_v1.publisher import Client as PublisherClient +from google.cloud.pubsub_v1.subscriber import Client as SubscriberClient __all__ = ( 'PublisherClient', diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/base.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/base.py index 68dc9c2850ec..61eea2bb9ad5 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/base.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/base.py @@ -15,6 +15,7 @@ from __future__ import absolute_import import abc +import enum import six @@ -104,7 +105,7 @@ def will_accept(self, message): bool: Whether this batch can accept the message. """ # If this batch is not accepting messages generally, return False. - if self.status != self.Status.ACCEPTING_MESSAGES: + if self.status != BatchStatus.ACCEPTING_MESSAGES: return False # If this batch can not hold the message in question, return False. @@ -133,13 +134,14 @@ def publish(self, message): """ raise NotImplementedError - class Status(object): - """An enum class representing valid statuses for a batch. - It is acceptable for a class to use a status that is not on this - class; this represents the list of statuses where the existing - library hooks in functionality. - """ - ACCEPTING_MESSAGES = 'accepting messages' - ERROR = 'error' - SUCCESS = 'success' +class BatchStatus(object): + """An enum-like class representing valid statuses for a batch. + + It is acceptable for a class to use a status that is not on this + class; this represents the list of statuses where the existing + library hooks in functionality. + """ + ACCEPTING_MESSAGES = 'accepting messages' + ERROR = 'error' + SUCCESS = 'success' diff --git a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py index 61aea29f8394..f5c08a76f315 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/batch/thread.py @@ -14,6 +14,7 @@ from __future__ import absolute_import +import logging import threading import time @@ -66,7 +67,7 @@ def __init__(self, client, topic, settings, autocommit=True): self._messages = [] self._size = 0 self._settings = settings - self._status = self.Status.ACCEPTING_MESSAGES + self._status = base.BatchStatus.ACCEPTING_MESSAGES self._topic = topic # If max latency is specified, start a thread to monitor the batch and @@ -154,7 +155,7 @@ def _commit(self): with self._commit_lock: # If, in the intervening period, the batch started to be committed, # or completed a commit, then no-op at this point. - if self._status != self.Status.ACCEPTING_MESSAGES: + if self._status != base.BatchStatus.ACCEPTING_MESSAGES: return # Update the status. @@ -165,10 +166,16 @@ def _commit(self): return # Begin the request to publish these messages. + # Log how long the underlying request takes. + start = time.time() response = self.client.api.publish( self._topic, self.messages, ) + end = time.time() + logging.getLogger().debug('gRPC Publish took {s} seconds.'.format( + s=end - start, + )) # We got a response from Pub/Sub; denote that we are processing. self._status = 'processing results' @@ -185,7 +192,7 @@ def _commit(self): # Iterate over the futures on the queue and return the response # IDs. We are trusting that there is a 1:1 mapping, and raise an # exception if not. - self._status = self.Status.SUCCESS + self._status = base.BatchStatus.SUCCESS for message_id, future in zip(response.message_ids, self._futures): future.set_result(message_id) diff --git a/pubsub/google/cloud/pubsub_v1/publisher/client.py b/pubsub/google/cloud/pubsub_v1/publisher/client.py index 1a9903a31748..e80662a715ef 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/client.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/client.py @@ -39,8 +39,8 @@ class Client(object): get sensible defaults. Args: - batch_settings (~.pubsub_v1.types.BatchSettings): The settings - for batch publishing. + batch_settings (~google.cloud.pubsub_v1.types.BatchSettings): The + settings for batch publishing. batch_class (class): A class that describes how to handle batches. You may subclass the :class:`.pubsub_v1.publisher.batch.base.BaseBatch` class in @@ -73,8 +73,8 @@ def batch(self, topic, message, create=True, autocommit=True): Args: topic (str): A string representing the topic. - message (~.pubsub_v1.types.PubsubMessage): The message that will - be committed. + message (~google.cloud.pubsub_v1.types.PubsubMessage): The message + that will be committed. create (bool): Whether to create a new batch if no batch is found. Defaults to True. autocommit (bool): Whether to autocommit this batch. @@ -128,16 +128,15 @@ def publish(self, topic, data, **attrs): >>> response = client.publish(topic, data, username='guido') Args: - topic (~.pubsub_v1.types.Topic): The topic to publish - messages to. + topic (str): The topic to publish messages to. data (bytes): A bytestring representing the message body. This must be a bytestring. attrs (Mapping[str, str]): A dictionary of attributes to be sent as metadata. (These may be text strings or byte strings.) Returns: - ~.pubsub_v1.publisher.futures.Future: An object conforming - to the ``concurrent.futures.Future`` interface. + ~concurrent.futures.Future: An object conforming to the + ``concurrent.futures.Future`` interface. """ # Sanity check: Is the data being sent as a bytestring? # If it is literally anything else, complain loudly about it. diff --git a/pubsub/google/cloud/pubsub_v1/publisher/exceptions.py b/pubsub/google/cloud/pubsub_v1/publisher/exceptions.py index 5b5d63d51494..bae090ceb9d7 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/exceptions.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/exceptions.py @@ -14,14 +14,9 @@ from __future__ import absolute_import -from google.api.core.exceptions import GoogleAPICallError - +from concurrent.futures import TimeoutError -try: - from concurrent.futures import TimeoutError -except ImportError: - class TimeoutError(Exception): - pass +from google.api.core.exceptions import GoogleAPICallError class PublishError(GoogleAPICallError): diff --git a/pubsub/google/cloud/pubsub_v1/publisher/futures.py b/pubsub/google/cloud/pubsub_v1/publisher/futures.py index dff4aa234947..cbc67d9e55c3 100644 --- a/pubsub/google/cloud/pubsub_v1/publisher/futures.py +++ b/pubsub/google/cloud/pubsub_v1/publisher/futures.py @@ -106,10 +106,6 @@ def exception(self, timeout=None, _wait=1): Returns: Exception: The exception raised by the call, if any. """ - # If no timeout was specified, use inf. - if timeout is None: - timeout = float('inf') - # Wait until the future is done. if not self._completed.wait(timeout=timeout): raise exceptions.TimeoutError('Timed out waiting for result.') diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py b/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py new file mode 100644 index 000000000000..d98a7bb75be4 --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +from google.cloud.pubsub_v1.subscriber.client import Client + + +__all__ = ( + 'Client', +) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/_consumer.py b/pubsub/google/cloud/pubsub_v1/subscriber/_consumer.py new file mode 100644 index 000000000000..9fb2567176bc --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/_consumer.py @@ -0,0 +1,267 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Bidirectional Streaming Consumer. + +The goal here is to consume a bidirectional streaming RPC by fanning out the +responses received from the server to be processed and fanning in requests from +the response processors to be sent to the server through the request stream. +This module is a framework to deal with this pattern in a consistent way: + + * A :class:`Consumer` manages scheduling requests to a stream and consuming + responses from a stream. The Consumer takes the responses and schedules + them to be processed in callbacks using any + :class:`~concurrent.futures.Executor`. + * A :class:`Policy` which determines how the consumer calls the RPC and + processes responses, errors, and messages. + +The :class:`Policy` is the only class that's intended to be sub-classed here. +This would be implemented for every bidirectional streaming method. +How does this work? The first part of the implementation, fanning out +responses, its actually quite straightforward and can be done with just a +:class:`concurrent.futures.Executor`: + +.. graphviz:: + digraph responses_only { + "gRPC C Core" -> "gRPC Python" [label="queue", dir="both"] + "gRPC Python" -> "Consumer" [label="responses", color="red"] + "Consumer" -> "Policy" [label="responses", color="red"] + "Policy" -> "futures.Executor" [label="response", color="red"] + "futures.Executor" -> "callback" [label="response", color="red"] + } + +The challenge comes from the fact that in bidirectional streaming two more +things have to be done: + + 1. The consumer must maintain a long-running request generator. + 2. The consumer must provide some way for the response processor to queue + new requests. + +These are especially important because in the case of Pub/Sub you are +essentially streaming requests indefinitely and receiving responses +indefinitely. + +For the first challenge, we take advantage of the fact that gRPC runs the +request generator in its own thread. That thread can block, so we can use +a queue for that: + +.. graphviz:: + digraph response_flow { + "gRPC C Core" -> "gRPC Python" [label="queue", dir="both"] + "gRPC Python" -> "Consumer" [label="responses", color="red"] + "Consumer" -> "request generator thread" [label="starts", color="gray"] + "request generator thread" -> "gRPC Python" + [label="requests", color="blue"] + } + +The final piece of the puzzle, allowing things from anywhere to queue new +requests, it a bit more complex. If we were only dealing with threads, then the +response workers could just directly interact with the policy/consumer to +queue new requests: + +.. graphviz:: + digraph thread_only_requests { + "gRPC C Core" -> "gRPC Python" [label="queue", dir="both"] + "gRPC Python" -> "Consumer" [label="responses", color="red"] + "Consumer" -> "request generator thread" [label="starts", color="gray"] + "request generator thread" -> "gRPC Python" + [label="requests", color="blue"] + "Consumer" -> "Policy" [label="responses", color="red"] + "Policy" -> "futures.Executor" [label="response", color="red"] + "futures.Executor" -> "callback" [label="response", color="red"] + "callback" -> "Consumer" [label="send_request", color="blue"] + } + +But, because this does not dictate any particular concurrent strategy for +dealing with the responses, it's possible that a response could be processed +in a different thread, process, or even on a different machine. Because of +this, we need an intermediary queue between the callbacks and the gRPC request +queue to bridge the "concurrecy gap". To pump items from the concurrecy-safe +queue into the gRPC request queue, we need another worker thread. Putting this +all together looks like this: + +.. graphviz:: + digraph responses_only { + "gRPC C Core" -> "gRPC Python" [label="queue", dir="both"] + "gRPC Python" -> "Consumer" [label="responses", color="red"] + "Consumer" -> "request generator thread" [label="starts", color="gray"] + "Policy" -> "QueueCallbackThread" [label="starts", color="gray"] + "request generator thread" -> "gRPC Python" + [label="requests", color="blue"] + "Consumer" -> "Policy" [label="responses", color="red"] + "Policy" -> "futures.Executor" [label="response", color="red"] + "futures.Executor" -> "callback" [label="response", color="red"] + "callback" -> "callback_request_queue" [label="requests", color="blue"] + "callback_request_queue" -> "QueueCallbackThread" + [label="consumed by", color="blue"] + "QueueCallbackThread" -> "Consumer" + [label="send_response", color="blue"] + } + +This part is actually up to the Policy to enable. The consumer just provides a +thread-safe queue for requests. The :cls:`QueueCallbackThread` can be used by +the Policy implementation to spin up the worker thread to pump the +concurrency-safe queue. See the Pub/Sub subscriber implementation for an +example of this. +""" + +import logging +import queue +import threading + +from google.cloud.pubsub_v1.subscriber import _helper_threads + +_LOGGER = logging.getLogger(__name__) + + +class Consumer(object): + """Bi-directional streaming RPC consumer. + + This class coordinates the consumption of a bi-directional streaming RPC. + There is a bit of background information to know before understanding how + this class operates: + + 1. gRPC has its own background thread for dealing with I/O. + 2. gRPC consumes a streaming call's request generator in another + thread. + 3. If the request generator thread exits, gRPC will close the + connection. + + Because of (2) and (3), the consumer must always at least use threading + for some bookkeeping. No matter what, a thread will be created by gRPC to + generate requests. This thread is called the *request generator thread*. + Having the request generator thread allows the consumer to hold the stream + open indefinitely. Now gRPC will send responses as fast as the consumer can + ask for them. The consumer hands these off to the :cls:`Policy` via + :meth:`Policy.on_response`, which should not block. + + Finally, we do not want to block the main thread, so the consumer actually + invokes the RPC itself in a separate thread. This thread is called the + *response consumer helper thread*. + + So all in all there are three threads: + + 1. gRPC's internal I/O thread. + 2. The request generator thread, created by gRPC. + 3. The response consumer helper thread, created by the Consumer. + + In addition, the Consumer likely uses some sort of concurreny to prevent + blocking on processing responses. The Policy may also use another thread to + deal with pumping messages from an external queue into the request queue + here. + + It may seem strange to use threads for something "high performance" + considering the GIL. However, the threads here are not CPU bound. They are + simple threads that are blocked by I/O and generally just move around some + simple objects between queues. The overhead for these helper threads is + low. The Consumer and end-user can configure any sort of executor they want + for the actual processing of the responses, which may be CPU intensive. + """ + def __init__(self, policy): + """ + Args: + policy (Consumer): The consumer policy, which defines how + requests and responses are handled. + """ + self._policy = policy + self._request_queue = queue.Queue() + self._exiting = threading.Event() + + self.active = False + self.helper_threads = _helper_threads.HelperThreadRegistry() + """:cls:`_helper_threads.HelperThreads`: manages the helper threads. + The policy may use this to schedule its own helper threads. + """ + + def send_request(self, request): + """Queue a request to be sent to gRPC. + + Args: + request (Any): The request protobuf. + """ + self._request_queue.put(request) + + def _request_generator_thread(self): + """Generate requests for the stream. + + This blocks for new requests on the request queue and yields them to + gRPC. + """ + # First, yield the initial request. This occurs on every new + # connection, fundamentally including a resumed connection. + initial_request = self._policy.get_initial_request(ack_queue=True) + _LOGGER.debug('Sending initial request: {initial_request}'.format( + initial_request=initial_request, + )) + yield initial_request + + # Now yield each of the items on the request queue, and block if there + # are none. This can and must block to keep the stream open. + while True: + request = self._request_queue.get() + if request == _helper_threads.STOP: + _LOGGER.debug('Request generator signaled to stop.') + break + + _LOGGER.debug('Sending request: {}'.format(request)) + yield request + + def _blocking_consume(self): + """Consume the stream indefinitely.""" + while True: + # It is possible that a timeout can cause the stream to not + # exit cleanly when the user has called stop_consuming(). This + # checks to make sure we're not exiting before opening a new + # stream. + if self._exiting.is_set(): + _LOGGER.debug('Event signalled consumer exit.') + break + + request_generator = self._request_generator_thread() + response_generator = self._policy.call_rpc(request_generator) + try: + for response in response_generator: + _LOGGER.debug('Received response: {0}'.format(response)) + self._policy.on_response(response) + + # If the loop above exits without an exception, then the + # request stream terminated cleanly, which should only happen + # when it was signaled to do so by stop_consuming. In this + # case, break out of the while loop and exit this thread. + _LOGGER.debug('Clean RPC loop exit signalled consumer exit.') + break + except KeyboardInterrupt: + self.stop_consuming() + except Exception as exc: + try: + self._policy.on_exception(exc) + except: + self.active = False + raise + + def start_consuming(self): + """Start consuming the stream.""" + self.active = True + self._exiting.clear() + self.helper_threads.start( + 'consume bidirectional stream', + self._request_queue, + self._blocking_consume, + ) + + def stop_consuming(self): + """Signal the stream to stop and block until it completes.""" + self.active = False + self._exiting.set() + self.helper_threads.stop_all() diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/_helper_threads.py b/pubsub/google/cloud/pubsub_v1/subscriber/_helper_threads.py new file mode 100644 index 000000000000..21e812a0d2ad --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/_helper_threads.py @@ -0,0 +1,129 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import logging +import threading +import uuid + +import six + +__all__ = ( + 'HelperThreadRegistry', + 'QueueCallbackThread', + 'STOP', +) + +_LOGGER = logging.getLogger(__name__) + +_HelperThread = collections.namedtuple( + 'HelperThreads', + ['name', 'thread', 'queue'], +) + + +# Helper thread stop indicator. This could be a sentinel object or None, +# but the sentinel object's ID can change if the process is forked, and +# None has the possibility of a user accidentally killing the helper +# thread. +STOP = uuid.uuid4() + + +class HelperThreadRegistry(object): + def __init__(self): + self._helper_threads = {} + + def __contains__(self, needle): + return needle in self._helper_threads + + def start(self, name, queue, target, *args, **kwargs): + """Create and start a helper thread. + + Args: + name (str): The name of the helper thread. + queue (Queue): A concurrency-safe queue. + target (Callable): The target of the thread. + args: Additional args passed to the thread constructor. + kwargs: Additional kwargs passed to the thread constructor. + + Returns: + threading.Thread: The created thread. + """ + # Create and start the helper thread. + thread = threading.Thread( + name='Consumer helper: {}'.format(name), + target=target, + *args, **kwargs + ) + thread.daemon = True + thread.start() + + # Keep track of the helper thread, so we are able to stop it. + self._helper_threads[name] = _HelperThread(name, thread, queue) + _LOGGER.debug('Started helper thread {}'.format(name)) + return thread + + def stop(self, name): + """Stops a helper thread. + + Sends the stop message and blocks until the thread joins. + + Args: + name (str): The name of the thread. + """ + # Attempt to retrieve the thread; if it is gone already, no-op. + helper_thread = self._helper_threads.get(name) + if helper_thread is None: + return + + # Join the thread if it is still alive. + if helper_thread.thread.is_alive(): + _LOGGER.debug('Stopping helper thread {}'.format(name)) + helper_thread.queue.put(STOP) + helper_thread.thread.join() + + # Remove the thread from our tracking. + self._helper_threads.pop(name, None) + + def stop_all(self): + """Stop all helper threads.""" + # This could be more efficient by sending the stop signal to all + # threads before joining any of them. + for name in list(six.iterkeys(self._helper_threads)): + self.stop(name) + + +class QueueCallbackThread(object): + """A helper thread that executes a callback for every item in + the queue. + """ + def __init__(self, queue, callback): + self.queue = queue + self._callback = callback + + def __call__(self): + while True: + item = self.queue.get() + if item == STOP: + break + + # Run the callback. If any exceptions occur, log them and + # continue. + try: + self._callback(item) + except Exception as exc: + _LOGGER.error('{class_}: {message}'.format( + class_=exc.__class__.__name__, + message=str(exc), + )) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/_histogram.py b/pubsub/google/cloud/pubsub_v1/subscriber/_histogram.py new file mode 100644 index 000000000000..09f047495896 --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/_histogram.py @@ -0,0 +1,155 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division + + +class Histogram(object): + """Representation of a single histogram. + + The purpose of this class is to store actual ack timing information + in order to predict how long to renew leases. + + The default implementation uses the 99th percentile of previous ack + times to implicitly lease messages; however, custom + :class:`~.pubsub_v1.subscriber.consumer.base.BaseConsumer` subclasses + are free to use a different formula. + + The precision of data stored is to the nearest integer. Additionally, + values outside the range of ``10 <= x <= 600`` are stored as ``10`` or + ``600``, since these are the boundaries of leases in the actual API. + """ + def __init__(self, data=None): + """Instantiate the histogram. + + Args: + data (Mapping[str, int]): The data strucure to be used to store + the underlying data. The default is an empty dictionary. + This can be set to a dictionary-like object if required + (for example, if a special object is needed for + concurrency reasons). + """ + # The data is stored as a dictionary, with the keys being the + # value being added and the values being the number of times that + # value was added to the dictionary. + # + # This is depending on the Python interpreter's implicit ordering + # of dictionaries, which is a bitwise sort by the key's ``hash()`` + # value. Because ``hash(int i) -> i`` and all of our keys are + # positive integers (negatives would be a problem because the sort + # is bitwise), we can rely on this. + if data is None: + data = {} + self._data = data + self._len = 0 + + def __len__(self): + """Return the total number of data points in this histogram. + + This is cached on a separate counter (rather than computing it using + ``sum([v for v in self._data.values()])``) to optimize lookup. + + Returns: + int: The total number of data points in this histogram. + """ + return self._len + + def __contains__(self, needle): + """Return True if needle is present in the histogram, False otherwise. + + Returns: + bool: True or False + """ + return needle in self._data + + def __repr__(self): + return ''.format( + len=len(self), + max=self.max, + min=self.min, + ) + + @property + def max(self): + """Return the maximum value in this histogram. + + If there are no values in the histogram at all, return 600. + + Returns: + int: The maximum value in the histogram. + """ + if len(self._data) == 0: + return 600 + return next(iter(reversed(sorted(self._data.keys())))) + + @property + def min(self): + """Return the minimum value in this histogram. + + If there are no values in the histogram at all, return 10. + + Returns: + int: The minimum value in the histogram. + """ + if len(self._data) == 0: + return 10 + return next(iter(sorted(self._data.keys()))) + + def add(self, value): + """Add the value to this histogram. + + Args: + value (int): The value. Values outside of ``10 <= x <= 600`` + will be raised to ``10`` or reduced to ``600``. + """ + # If the value is out of bounds, bring it in bounds. + value = int(value) + if value < 10: + value = 10 + if value > 600: + value = 600 + + # Add the value to the histogram's data dictionary. + self._data.setdefault(value, 0) + self._data[value] += 1 + self._len += 1 + + def percentile(self, percent): + """Return the value that is the Nth precentile in the histogram. + + Args: + percent (Union[int, float]): The precentile being sought. The + default consumer implementations use consistently use ``99``. + + Returns: + int: The value corresponding to the requested percentile. + """ + # Sanity check: Any value over 100 should become 100. + if percent >= 100: + percent = 100 + + # Determine the actual target number. + target = len(self) - len(self) * (percent / 100) + + # Iterate over the values in reverse, dropping the target by the + # number of times each value has been seen. When the target passes + # 0, return the value we are currently viewing. + for k in reversed(sorted(self._data.keys())): + target -= self._data[k] + if target < 0: + return k + + # The only way to get here is if there was no data. + # In this case, just return 10 seconds. + return 10 diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/client.py b/pubsub/google/cloud/pubsub_v1/subscriber/client.py new file mode 100644 index 000000000000..afb9f7d7ca75 --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/client.py @@ -0,0 +1,98 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import pkg_resources + +from google.cloud.gapic.pubsub.v1 import subscriber_client + +from google.cloud.pubsub_v1 import _gapic +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber.policy import thread + + +__VERSION__ = pkg_resources.get_distribution('google-cloud-pubsub').version + + +@_gapic.add_methods(subscriber_client.SubscriberClient, + blacklist=('pull', 'streaming_pull')) +class Client(object): + """A subscriber client for Google Cloud Pub/Sub. + + This creates an object that is capable of subscribing to messages. + Generally, you can instantiate this client with no arguments, and you + get sensible defaults. + + Args: + policy_class (class): A class that describes how to handle + subscriptions. You may subclass the + :class:`.pubsub_v1.subscriber.policy.base.BasePolicy` + class in order to define your own consumer. This is primarily + provided to allow use of different concurrency models; the default + is based on :class:`threading.Thread`. + kwargs (dict): Any additional arguments provided are sent as keyword + keyword arguments to the underlying + :class:`~.gapic.pubsub.v1.subscriber_client.SubscriberClient`. + Generally, you should not need to set additional keyword + arguments. + """ + def __init__(self, policy_class=thread.Policy, **kwargs): + # Add the metrics headers, and instantiate the underlying GAPIC + # client. + kwargs['lib_name'] = 'gccl' + kwargs['lib_version'] = __VERSION__ + self.api = subscriber_client.SubscriberClient(**kwargs) + + # The subcription class is responsible to retrieving and dispatching + # messages. + self._policy_class = policy_class + + def subscribe(self, subscription, callback=None, flow_control=()): + """Return a representation of an individual subscription. + + This method creates and returns a ``Consumer`` object (that is, a + :class:`~.pubsub_v1.subscriber.consumer.base.BaseConsumer`) + subclass) bound to the topic. It does `not` create the subcription + on the backend (or do any API call at all); it simply returns an + object capable of doing these things. + + If the ``callback`` argument is provided, then the :meth:`open` method + is automatically called on the returned object. If ``callback`` is + not provided, the subscription is returned unopened. + + .. note:: + It only makes sense to provide ``callback`` here if you have + already created the subscription manually in the API. + + Args: + subscription (str): The name of the subscription. The + subscription should have already been created (for example, + by using :meth:`create_subscription`). + callback (function): The callback function. This function receives + the :class:`~.pubsub_v1.types.PubsubMessage` as its only + argument. + flow_control (~.pubsub_v1.types.FlowControl): The flow control + settings. Use this to prevent situations where you are + inundated with too many messages at once. + + Returns: + ~.pubsub_v1.subscriber.consumer.base.BaseConsumer: An instance + of the defined ``consumer_class`` on the client. + """ + flow_control = types.FlowControl(*flow_control) + subscr = self._policy_class(self, subscription, flow_control) + if callable(callback): + subscr.open(callback) + return subscr diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py new file mode 100644 index 000000000000..1015149cfbbf --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py @@ -0,0 +1,198 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import math +import time + + +class Message(object): + """A representation of a single Pub/Sub message. + + The common way to interact with + :class:`~.pubsub_v1.subscriber.message.Message` objects is to receive + them in callbacks on subscriptions; most users should never have a need + to instantiate them by hand. (The exception to this is if you are + implementing a custom subclass to + :class:`~.pubsub_v1.subscriber.consumer.BaseConsumer`.) + + Attributes: + message_id (str): The message ID. In general, you should not need + to use this directly. + data (bytes): The data in the message. Note that this will be a + :class:`bytes`, not a text string. + attributes (dict): The attributes sent along with the message. + publish_time (datetime): The time that this message was originally + published. + """ + def __init__(self, message, ack_id, request_queue): + """Construct the Message. + + .. note:: + + This class should not be constructed directly; it is the + responsibility of :class:`BasePolicy` subclasses to do so. + + Args: + message (~.pubsub_v1.types.PubsubMessage): The message received + from Pub/Sub. + ack_id (str): The ack_id received from Pub/Sub. + request_queue (queue.Queue): A queue provided by the policy that + can accept requests; the policy is responsible for handling + those requests. + """ + self._message = message + self._ack_id = ack_id + self._request_queue = request_queue + self.message_id = message.message_id + + # The instantiation time is the time that this message + # was received. Tracking this provides us a way to be smart about + # the default lease deadline. + self._received_timestamp = time.time() + + # The policy should lease this message, telling PubSub that it has + # it until it is acked or otherwise dropped. + self.lease() + + def __repr__(self): + # Get an abbreviated version of the data. + abbv_data = self._message.data + if len(abbv_data) > 50: + abbv_data = abbv_data[0:50] + b'...' + + # Return a useful representation. + answer = 'Message {\n' + answer += ' data: {0!r}\n'.format(abbv_data) + answer += ' attributes: {0!r}\n'.format(self.attributes) + answer += '}' + return answer + + @property + def attributes(self): + """Return the attributes of the underlying Pub/Sub Message. + + Returns: + dict: The message's attributes. + """ + return self._message.attributes + + @property + def data(self): + """Return the data for the underlying Pub/Sub Message. + + Returns: + bytes: The message data. This is always a bytestring; if you + want a text string, call :meth:`bytes.decode`. + """ + return self._message.data + + @property + def publish_time(self): + """Return the time that the message was originally published. + + Returns: + datetime: The date and time that the message was published. + """ + return self._message.publish_time + + @property + def size(self): + """Return the size of the underlying message, in bytes.""" + return self._message.ByteSize() + + def ack(self): + """Acknowledge the given message. + + Acknowledging a message in Pub/Sub means that you are done + with it, and it will not be delivered to this subscription again. + You should avoid acknowledging messages until you have + *finished* processing them, so that in the event of a failure, + you receive the message again. + + .. warning:: + Acks in Pub/Sub are best effort. You should always + ensure that your processing code is idempotent, as you may + receive any given message more than once. + """ + time_to_ack = math.ceil(time.time() - self._received_timestamp) + self._request_queue.put(('ack', { + 'ack_id': self._ack_id, + 'byte_size': self.size, + 'time_to_ack': time_to_ack, + })) + + def drop(self): + """Release the message from lease management. + + This informs the policy to no longer hold on to the lease for this + message. Pub/Sub will re-deliver the message if it is not acknowledged + before the existing lease expires. + + .. warning:: + For most use cases, the only reason to drop a message from + lease management is on :meth:`ack` or :meth:`nack`; these methods + both call this one. You probably do not want to call this method + directly. + """ + self._request_queue.put(('drop', { + 'ack_id': self._ack_id, + 'byte_size': self.size, + })) + + def lease(self): + """Inform the policy to lease this message continually. + + .. note:: + This method is called by the constructor, and you should never + need to call it manually. + """ + self._request_queue.put(('lease', { + 'ack_id': self._ack_id, + 'byte_size': self.size, + })) + + def modify_ack_deadline(self, seconds): + """Set the deadline for acknowledgement to the given value. + + The default implementation handles this for you; you should not need + to manually deal with setting ack deadlines. The exception case is + if you are implementing your own custom subclass of + :class:`~.pubsub_v1.subcriber.consumer.BaseConsumer`. + + .. note:: + This is not an extension; it *sets* the deadline to the given + number of seconds from right now. It is even possible to use this + method to make a deadline shorter. + + Args: + seconds (int): The number of seconds to set the lease deadline + to. This should be between 0 and 600. Due to network latency, + values below 10 are advised against. + """ + self._request_queue.put(('modify_ack_deadline', { + 'ack_id': self._ack_id, + 'seconds': seconds, + })) + + def nack(self): + """Decline to acknowldge the given message. + + This will cause the message to be re-delivered to the subscription. + """ + self._request_queue.put(('nack', { + 'ack_id': self._ack_id, + 'byte_size': self.size, + })) diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/__init__.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py new file mode 100644 index 000000000000..85d047eb9439 --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/base.py @@ -0,0 +1,392 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division + +import abc +import logging +import random +import time + +import six + +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber import _consumer +from google.cloud.pubsub_v1.subscriber import _histogram + +logger = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class BasePolicy(object): + """Abstract class defining a subscription policy. + + Although the :class:`~.pubsub_v1.subscriber.policy.thread.Policy` class, + based on :class:`threading.Thread`, is fine for most cases, + advanced users may need to implement something based on a different + concurrency model. + + This class defines the interface for the policy implementation; + subclasses may be passed as the ``policy_class`` argument to + :class:`~.pubsub_v1.client.SubscriberClient`. + """ + def __init__(self, client, subscription, + flow_control=types.FlowControl(), histogram_data=None): + """Instantiate the policy. + + Args: + client (~.pubsub_v1.subscriber.client): The subscriber client used + to create this instance. + subscription (str): The name of the subscription. The canonical + format for this is + ``projects/{project}/subscriptions/{subscription}``. + flow_control (~.pubsub_v1.types.FlowControl): The flow control + settings. + histogram_data (dict): Optional: A structure to store the histogram + data for predicting appropriate ack times. If set, this should + be a dictionary-like object. + + .. note:: + Additionally, the histogram relies on the assumption + that the dictionary will properly sort keys provided + that all keys are positive integers. If you are sending + your own dictionary class, ensure this assumption holds + or you will get strange behavior. + """ + self._client = client + self._subscription = subscription + self._consumer = _consumer.Consumer(self) + self._ack_deadline = 10 + self._last_histogram_size = 0 + self.flow_control = flow_control + self.histogram = _histogram.Histogram(data=histogram_data) + + # These are for internal flow control tracking. + # They should not need to be used by subclasses. + self._bytes = 0 + self._ack_on_resume = set() + self._paused = False + + @property + def ack_deadline(self): + """Return the appropriate ack deadline. + + This method is "sticky". It will only perform the computations to + check on the right ack deadline if the histogram has gained a + significant amount of new information. + + Returns: + int: The correct ack deadline. + """ + target = min([ + self._last_histogram_size * 2, + self._last_histogram_size + 100, + ]) + if len(self.histogram) > target: + self._ack_deadline = self.histogram.percentile(percent=99) + return self._ack_deadline + + @property + def managed_ack_ids(self): + """Return the ack IDs currently being managed by the policy. + + Returns: + set: The set of ack IDs being managed. + """ + if not hasattr(self, '_managed_ack_ids'): + self._managed_ack_ids = set() + return self._managed_ack_ids + + @property + def subscription(self): + """Return the subscription. + + Returns: + str: The subscription + """ + return self._subscription + + @property + def _load(self): + """Return the current load. + + The load is represented as a float, where 1.0 represents having + hit one of the flow control limits, and values between 0.0 and 1.0 + represent how close we are to them. (0.5 means we have exactly half + of what the flow control setting allows, for example.) + + There are (currently) two flow control settings; this property + computes how close the subscriber is to each of them, and returns + whichever value is higher. (It does not matter that we have lots of + running room on setting A if setting B is over.) + + Returns: + float: The load value. + """ + return max([ + len(self.managed_ack_ids) / self.flow_control.max_messages, + self._bytes / self.flow_control.max_bytes, + ]) + + def ack(self, ack_id, time_to_ack=None, byte_size=None): + """Acknowledge the message corresponding to the given ack_id. + + Args: + ack_id (str): The ack ID. + time_to_ack (int): The time it took to ack the message, measured + from when it was received from the subscription. This is used + to improve the automatic ack timing. + byte_size (int): The size of the PubSub message, in bytes. + """ + # If we got timing information, add it to the histogram. + if time_to_ack is not None: + self.histogram.add(int(time_to_ack)) + + # Send the request to ack the message. + # However, if the consumer is inactive, then queue the ack_id here + # instead; it will be acked as part of the initial request when the + # consumer is started again. + if self._consumer.active: + request = types.StreamingPullRequest(ack_ids=[ack_id]) + self._consumer.send_request(request) + else: + self._ack_on_resume.add(ack_id) + + # Remove the message from lease management. + self.drop(ack_id=ack_id, byte_size=byte_size) + + def call_rpc(self, request_generator): + """Invoke the Pub/Sub streaming pull RPC. + + Args: + request_generator (Generator): A generator that yields requests, + and blocks if there are no outstanding requests (until such + time as there are). + """ + return self._client.api.streaming_pull(request_generator) + + def drop(self, ack_id, byte_size): + """Remove the given ack ID from lease management. + + Args: + ack_id (str): The ack ID. + byte_size (int): The size of the PubSub message, in bytes. + """ + # Remove the ack ID from lease management, and decrement the + # byte counter. + if ack_id in self.managed_ack_ids: + self.managed_ack_ids.remove(ack_id) + self._bytes -= byte_size + self._bytes = min([self._bytes, 0]) + + # If we have been paused by flow control, check and see if we are + # back within our limits. + # + # In order to not thrash too much, require us to have passed below + # the resume threshold (80% by default) of each flow control setting + # before restarting. + if self._paused and self._load < self.flow_control.resume_threshold: + self._paused = False + self.open(self._callback) + + def get_initial_request(self, ack_queue=False): + """Return the initial request. + + This defines the initial request that must always be sent to Pub/Sub + immediately upon opening the subscription. + + Args: + ack_queue (bool): Whether to include any acks that were sent + while the connection was paused. + + Returns: + ~.pubsub_v1.types.StreamingPullRequest: A request suitable + for being the first request on the stream (and not suitable + for any other purpose). + + .. note:: + If ``ack_queue`` is set to True, this includes the ack_ids, but + also clears the internal set. + + This means that calls to :meth:`get_initial_request` with + ``ack_queue`` set to True are not idempotent. + """ + # Any ack IDs that are under lease management and not being acked + # need to have their deadline extended immediately. + ack_ids = set() + lease_ids = self.managed_ack_ids + if ack_queue: + ack_ids = self._ack_on_resume + lease_ids = lease_ids.difference(ack_ids) + + # Put the request together. + request = types.StreamingPullRequest( + ack_ids=list(ack_ids), + modify_deadline_ack_ids=list(lease_ids), + modify_deadline_seconds=[self.ack_deadline] * len(lease_ids), + stream_ack_deadline_seconds=self.histogram.percentile(99), + subscription=self.subscription, + ) + + # Clear the ack_ids set. + # Note: If `ack_queue` is False, this just ends up being a no-op, + # since the set is just an empty set. + ack_ids.clear() + + # Return the initial request. + return request + + def lease(self, ack_id, byte_size): + """Add the given ack ID to lease management. + + Args: + ack_id (str): The ack ID. + byte_size (int): The size of the PubSub message, in bytes. + """ + # Add the ack ID to the set of managed ack IDs, and increment + # the size counter. + if ack_id not in self.managed_ack_ids: + self.managed_ack_ids.add(ack_id) + self._bytes += byte_size + + # Sanity check: Do we have too many things in our inventory? + # If we do, we need to stop the stream. + if self._load >= 1.0: + self._paused = True + self.close() + + def maintain_leases(self): + """Maintain all of the leases being managed by the policy. + + This method modifies the ack deadline for all of the managed + ack IDs, then waits for most of that time (but with jitter), and + then calls itself. + + .. warning:: + This method blocks, and generally should be run in a separate + thread or process. + + Additionally, you should not have to call this method yourself, + unless you are implementing your own policy. If you are + implementing your own policy, you _should_ call this method + in an appropriate form of subprocess. + """ + while True: + # Sanity check: Should this infinitely loop quit? + if not self._consumer.active: + return + + # Determine the appropriate duration for the lease. This is + # based off of how long previous messages have taken to ack, with + # a sensible default and within the ranges allowed by Pub/Sub. + p99 = self.histogram.percentile(99) + logger.debug('The current p99 value is %d seconds.' % p99) + + # Create a streaming pull request. + # We do not actually call `modify_ack_deadline` over and over + # because it is more efficient to make a single request. + ack_ids = list(self.managed_ack_ids) + logger.debug('Renewing lease for %d ack IDs.' % len(ack_ids)) + if len(ack_ids) > 0 and self._consumer.active: + request = types.StreamingPullRequest( + modify_deadline_ack_ids=ack_ids, + modify_deadline_seconds=[p99] * len(ack_ids), + ) + self._consumer.send_request(request) + + # Now wait an appropriate period of time and do this again. + # + # We determine the appropriate period of time based on a random + # period between 0 seconds and 90% of the lease. This use of + # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases + # where there are many clients. + snooze = random.uniform(0.0, p99 * 0.9) + logger.debug('Snoozing lease management for %f seconds.' % snooze) + time.sleep(snooze) + + def modify_ack_deadline(self, ack_id, seconds): + """Modify the ack deadline for the given ack_id. + + Args: + ack_id (str): The ack ID + seconds (int): The number of seconds to set the new deadline to. + """ + request = types.StreamingPullRequest( + modify_deadline_ack_ids=[ack_id], + modify_deadline_seconds=[seconds], + ) + self._consumer.send_request(request) + + def nack(self, ack_id, byte_size=None): + """Explicitly deny receipt of a message. + + Args: + ack_id (str): The ack ID. + byte_size (int): The size of the PubSub message, in bytes. + """ + self.modify_ack_deadline(ack_id=ack_id, seconds=0) + self.drop(ack_id=ack_id, byte_size=byte_size) + + @abc.abstractmethod + def close(self): + """Close the existing connection.""" + raise NotImplementedError + + @abc.abstractmethod + def on_exception(self, exception): + """Called when a gRPC exception occurs. + + If this method does nothing, then the stream is re-started. If this + raises an exception, it will stop the consumer thread. + This is executed on the response consumer helper thread. + + Args: + exception (Exception): The exception raised by the RPC. + """ + raise NotImplementedError + + @abc.abstractmethod + def on_response(self, response): + """Process a response from gRPC. + + This gives the consumer control over how responses are scheduled to + be processed. This method is expected to not block and instead + schedule the response to be consumed by some sort of concurrency. + + For example, if a the Policy implementation takes a callback in its + constructor, you can schedule the callback using a + :cls:`concurrent.futures.ThreadPoolExecutor`:: + + self._pool.submit(self._callback, response) + + This is called from the response consumer helper thread. + + Args: + response (Any): The protobuf response from the RPC. + """ + raise NotImplementedError + + @abc.abstractmethod + def open(self, callback): + """Open a streaming pull connection and begin receiving messages. + + For each message received, the ``callback`` function is fired with + a :class:`~.pubsub_v1.subscriber.message.Message` as its only + argument. + + Args: + callback (Callable[Message]): A callable that receives a + Pub/Sub Message. + """ + raise NotImplementedError diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py new file mode 100644 index 000000000000..df0f965748de --- /dev/null +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -0,0 +1,147 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +from concurrent import futures +from queue import Queue +import logging +import threading + +import grpc + +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber import _helper_threads +from google.cloud.pubsub_v1.subscriber.policy import base +from google.cloud.pubsub_v1.subscriber.message import Message + + +logger = logging.getLogger(__name__) + + +class Policy(base.BasePolicy): + """A consumer class based on :class:`threading.Thread`. + + This consumer handles the connection to the Pub/Sub service and all of + the concurrency needs. + """ + def __init__(self, client, subscription, flow_control=types.FlowControl(), + executor=None, queue=None): + """Instantiate the policy. + + Args: + client (~.pubsub_v1.subscriber.client): The subscriber client used + to create this instance. + subscription (str): The name of the subscription. The canonical + format for this is + ``projects/{project}/subscriptions/{subscription}``. + flow_control (~google.cloud.pubsub_v1.types.FlowControl): The flow + control settings. + executor (~concurrent.futures.ThreadPoolExecutor): (Optional.) A + ThreadPoolExecutor instance, or anything duck-type compatible + with it. + queue (~queue.Queue): (Optional.) A Queue instance, appropriate + for crossing the concurrency boundary implemented by + ``executor``. + """ + # Default the callback to a no-op; it is provided by `.open`. + self._callback = lambda message: None + + # Create a queue for keeping track of shared state. + if queue is None: + queue = Queue() + self._request_queue = Queue() + + # Call the superclass constructor. + super(Policy, self).__init__( + client=client, + flow_control=flow_control, + subscription=subscription, + ) + + # Also maintain a request queue and an executor. + logger.debug('Creating callback requests thread (not starting).') + if executor is None: + executor = futures.ThreadPoolExecutor(max_workers=10) + self._executor = executor + self._callback_requests = _helper_threads.QueueCallbackThread( + self._request_queue, + self.on_callback_request, + ) + + def close(self): + """Close the existing connection.""" + # Close the main subscription connection. + self._consumer.helper_threads.stop('callback requests worker') + self._consumer.stop_consuming() + + def open(self, callback): + """Open a streaming pull connection and begin receiving messages. + + For each message received, the ``callback`` function is fired with + a :class:`~.pubsub_v1.subscriber.message.Message` as its only + argument. + + Args: + callback (Callable): The callback function. + """ + # Start the thread to pass the requests. + logger.debug('Starting callback requests worker.') + self._callback = callback + self._consumer.helper_threads.start( + 'callback requests worker', + self._request_queue, + self._callback_requests, + ) + + # Actually start consuming messages. + self._consumer.start_consuming() + + # Spawn a helper thread that maintains all of the leases for + # this policy. + logger.debug('Spawning lease maintenance worker.') + self._leaser = threading.Thread(target=self.maintain_leases) + self._leaser.daemon = True + self._leaser.start() + + def on_callback_request(self, callback_request): + """Map the callback request to the appropriate GRPC request.""" + action, kwargs = callback_request[0], callback_request[1] + getattr(self, action)(**kwargs) + + def on_exception(self, exception): + """Bubble the exception. + + This will cause the stream to exit loudly. + """ + # If this is DEADLINE_EXCEEDED, then we want to retry. + # That entails just returning None. + deadline_exceeded = grpc.StatusCode.DEADLINE_EXCEEDED + if getattr(exception, 'code', lambda: None)() == deadline_exceeded: + return + + # Raise any other exception. + raise exception + + def on_response(self, response): + """Process all received Pub/Sub messages. + + For each message, schedule a callback with the executor. + """ + for msg in response.received_messages: + logger.debug('New message received from Pub/Sub: %r', msg) + logger.debug(self._callback) + message = Message(msg.message, msg.ack_id, self._request_queue) + future = self._executor.submit(self._callback, message) + logger.debug('Result: %s' % future.result()) diff --git a/pubsub/google/cloud/pubsub_v1/types.py b/pubsub/google/cloud/pubsub_v1/types.py index 98677f0b537f..a9de4a88f7f8 100644 --- a/pubsub/google/cloud/pubsub_v1/types.py +++ b/pubsub/google/cloud/pubsub_v1/types.py @@ -21,6 +21,7 @@ from google.cloud.proto.pubsub.v1 import pubsub_pb2 from google.gax.utils.messages import get_messages +from google.protobuf import timestamp_pb2 # Define the default values for batching. @@ -28,12 +29,13 @@ # This class is used when creating a publisher or subscriber client, and # these settings can be altered to tweak Pub/Sub behavior. # The defaults should be fine for most use cases. -BatchSettings = collections.namedtuple('BatchSettings', +BatchSettings = collections.namedtuple( + 'BatchSettings', ['max_bytes', 'max_latency', 'max_messages'], ) BatchSettings.__new__.__defaults__ = ( 1024 * 1024 * 5, # max_bytes: 5 MB - 0.25, # max_latency: 0.25 seconds + 0.05, # max_latency: 0.05 seconds 1000, # max_messages: 1,000 ) @@ -42,17 +44,25 @@ # This class is used when creating a publisher or subscriber client, and # these settings can be altered to tweak Pub/Sub behavior. # The defaults should be fine for most use cases. -FlowControl = collections.namedtuple('FlowControl', - ['max_bytes', 'max_messages'], +FlowControl = collections.namedtuple( + 'FlowControl', + ['max_bytes', 'max_messages', 'resume_threshold'], ) FlowControl.__new__.__defaults__ = ( psutil.virtual_memory().total * 0.2, # max_bytes: 20% of total RAM float('inf'), # max_messages: no limit + 0.8, # resume_threshold: 80% ) -_names = ['BatchSettings', 'FlowControl'] +# Pub/Sub uses timestamps from the common protobuf package. +# Do not make users import from there. +Timestamp = timestamp_pb2.Timestamp + + +_names = ['BatchSettings', 'FlowControl', 'Timestamp'] for name, message in get_messages(pubsub_pb2).items(): + message.__module__ = 'google.cloud.pubsub_v1.types' setattr(sys.modules[__name__], name, message) _names.append(name) diff --git a/pubsub/nox.py b/pubsub/nox.py index 4bcecafe66b4..c860e0741fe6 100644 --- a/pubsub/nox.py +++ b/pubsub/nox.py @@ -38,10 +38,10 @@ def unit_tests(session, python_version): session.install('-e', '.') # Run py.test against the unit tests. - session.run('py.test', '--quiet', - '--cov=google.cloud.pubsub', '--cov=tests.unit', '--cov-append', - '--cov-config=.coveragerc', '--cov-report=', '--cov-fail-under=97', - 'tests/unit', + session.run( + 'py.test', '--quiet', '--cov-append', '--cov-report=', + '--cov=google.cloud.pubsub', '--cov=google.cloud.pubsub_v1', + '--cov-config=.coveragerc', 'tests/unit', ) @@ -87,7 +87,8 @@ def lint(session): '--library-filesets', 'google', '--test-filesets', 'tests', # Temporarily allow this to fail. - success_codes=range(0, 100)) + success_codes=range(0, 100), + ) @nox.session diff --git a/pubsub/setup.py b/pubsub/setup.py index 1899896ece21..91bbeb8e2a8c 100644 --- a/pubsub/setup.py +++ b/pubsub/setup.py @@ -60,7 +60,7 @@ setup( name='google-cloud-pubsub', - version='0.26.0', + version='0.27.0', description='Python Client for Google Cloud Pub/Sub', long_description=README, namespace_packages=[ diff --git a/pubsub/tests/system.py b/pubsub/tests/system.py new file mode 100644 index 000000000000..02666eae676a --- /dev/null +++ b/pubsub/tests/system.py @@ -0,0 +1,106 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import time +import uuid + +import mock +import six + +from google import auth +from google.cloud import pubsub_v1 + + +def _resource_name(resource_type): + """Return a randomly selected name for a resource. + + Args: + resource_type (str): The resource for which a name is being + generated. Should be singular (e.g. "topic", "subscription") + """ + return 'projects/{project}/{resource_type}s/st-n{random}'.format( + project=auth.default()[1], + random=str(uuid.uuid4())[0:8], + resource_type=resource_type, + ) + + +def test_publish_messages(): + publisher = pubsub_v1.PublisherClient() + topic_name = _resource_name('topic') + futures = [] + + try: + publisher.create_topic(topic_name) + for i in range(0, 500): + futures.append( + publisher.publish( + topic_name, + b'The hail in Wales falls mainly on the snails.', + num=str(i), + ), + ) + for future in futures: + result = future.result() + assert isinstance(result, (six.text_type, six.binary_type)) + finally: + publisher.delete_topic(topic_name) + + +def test_subscribe_to_messages(): + publisher = pubsub_v1.PublisherClient() + subscriber = pubsub_v1.SubscriberClient() + topic_name = _resource_name('topic') + sub_name = _resource_name('subscription') + + try: + # Create a topic. + publisher.create_topic(topic_name) + + # Subscribe to the topic. This must happen before the messages + # are published. + subscriber.create_subscription(sub_name, topic_name) + subscription = subscriber.subscribe(sub_name) + + # Publish some messages. + futures = [publisher.publish( + topic_name, + b'Wooooo! The claaaaaw!', + num=str(i), + ) for i in range(0, 50)] + + # Make sure the publish completes. + [f.result() for f in futures] + + # The callback should process the message numbers to prove + # that we got everything at least once. + callback = mock.Mock(wraps=lambda message: message.ack()) + + # Actually open the subscription and hold it open for a few seconds. + subscription.open(callback) + for second in range(0, 10): + time.sleep(1) + + # The callback should have fired at least fifty times, but it + # may take some time. + if callback.call_count >= 50: + return + + # Okay, we took too long; fail out. + assert callback.call_count >= 50 + finally: + publisher.delete_topic(topic_name) + subscriber.delete_subscription(sub_name) diff --git a/pubsub/tests/unit/__init__.py b/pubsub/tests/unit/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py new file mode 100644 index 000000000000..05a749d58425 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_base.py @@ -0,0 +1,69 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import mock + +from google.auth import credentials +from google.cloud.pubsub_v1 import publisher +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.publisher.batch.base import BatchStatus +from google.cloud.pubsub_v1.publisher.batch.thread import Batch + + +def create_batch(status=None, settings=types.BatchSettings()): + """Create a batch object, which does not commit. + + Args: + status (str): If provided, the batch's internal status will be set + to the provided status. + + Returns: + ~.pubsub_v1.publisher.batch.thread.Batch: The batch object + """ + creds = mock.Mock(spec=credentials.Credentials) + client = publisher.Client(credentials=creds) + batch = Batch(client, 'topic_name', settings, autocommit=False) + if status: + batch._status = status + return batch + + +def test_len(): + batch = create_batch(status=BatchStatus.ACCEPTING_MESSAGES) + assert len(batch) == 0 + batch.publish(types.PubsubMessage(data=b'foo')) + assert len(batch) == 1 + + +def test_will_accept(): + batch = create_batch(status=BatchStatus.ACCEPTING_MESSAGES) + message = types.PubsubMessage() + assert batch.will_accept(message) is True + + +def test_will_not_accept_status(): + batch = create_batch(status='talk to the hand') + message = types.PubsubMessage() + assert batch.will_accept(message) is False + + +def test_will_not_accept_size(): + batch = create_batch( + settings=types.BatchSettings(max_bytes=10), + status=BatchStatus.ACCEPTING_MESSAGES, + ) + message = types.PubsubMessage(data=b'abcdefghijklmnopqrstuvwxyz') + assert batch.will_accept(message) is False diff --git a/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py new file mode 100644 index 000000000000..00b761f52b96 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/publisher/batch/test_thread.py @@ -0,0 +1,204 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading +import time + +import mock + +from google.auth import credentials +from google.cloud.pubsub_v1 import publisher +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.publisher import exceptions +from google.cloud.pubsub_v1.publisher.batch.base import BatchStatus +from google.cloud.pubsub_v1.publisher.batch.thread import Batch + + +def create_client(): + creds = mock.Mock(spec=credentials.Credentials) + return publisher.Client(credentials=creds) + + +def create_batch(autocommit=False, **batch_settings): + """Return a batch object suitable for testing. + + Args: + autocommit (bool): Whether the batch should commit after + ``max_latency`` seconds. By default, this is ``False`` + for unit testing. + kwargs (dict): Arguments passed on to the + :class:``~.pubsub_v1.types.BatchSettings`` constructor. + + Returns: + ~.pubsub_v1.publisher.batch.thread.Batch: A batch object. + """ + client = create_client() + settings = types.BatchSettings(**batch_settings) + return Batch(client, 'topic_name', settings, autocommit=autocommit) + + +def test_init(): + """Establish that a monitor thread is usually created on init.""" + client = create_client() + + # Do not actually create a thread, but do verify that one was created; + # it should be running the batch's "monitor" method (which commits the + # batch once time elapses). + with mock.patch.object(threading, 'Thread', autospec=True) as Thread: + batch = Batch(client, 'topic_name', types.BatchSettings()) + Thread.assert_called_once_with(target=batch.monitor) + + # New batches start able to accept messages by default. + assert batch.status == BatchStatus.ACCEPTING_MESSAGES + + +def test_init_infinite_latency(): + batch = create_batch(max_latency=float('inf')) + assert batch._thread is None + + +def test_client(): + client = create_client() + settings = types.BatchSettings() + batch = Batch(client, 'topic_name', settings, autocommit=False) + assert batch.client is client + + +def test_commit(): + batch = create_batch() + with mock.patch.object(threading, 'Thread', autospec=True) as Thread: + batch.commit() + + # A thread should have been created to do the actual commit. + Thread.assert_called_once_with(target=batch._commit) + Thread.return_value.start.assert_called_once_with() + + # The batch's status needs to be something other than "accepting messages", + # since the commit started. + assert batch.status != BatchStatus.ACCEPTING_MESSAGES + + +def test_blocking_commit(): + batch = create_batch() + futures = ( + batch.publish({'data': b'This is my message.'}), + batch.publish({'data': b'This is another message.'}), + ) + + # Set up the underlying API publish method to return a PublishResponse. + with mock.patch.object(type(batch.client.api), 'publish') as publish: + publish.return_value = types.PublishResponse(message_ids=['a', 'b']) + + # Actually commit the batch. + batch._commit() + + # Establish that the underlying API call was made with expected + # arguments. + publish.assert_called_once_with('topic_name', [ + types.PubsubMessage(data=b'This is my message.'), + types.PubsubMessage(data=b'This is another message.'), + ]) + + # Establish that all of the futures are done, and that they have the + # expected values. + assert all([f.done() for f in futures]) + assert futures[0].result() == 'a' + assert futures[1].result() == 'b' + + +def test_blocking_commit_no_messages(): + batch = create_batch() + with mock.patch.object(type(batch.client.api), 'publish') as publish: + batch._commit() + assert publish.call_count == 0 + + +def test_blocking_commit_wrong_messageid_length(): + batch = create_batch() + futures = ( + batch.publish({'data': b'blah blah blah'}), + batch.publish({'data': b'blah blah blah blah'}), + ) + + # Set up a PublishResponse that only returns one message ID. + with mock.patch.object(type(batch.client.api), 'publish') as publish: + publish.return_value = types.PublishResponse(message_ids=['a']) + batch._commit() + for future in futures: + assert future.done() + assert isinstance(future.exception(), exceptions.PublishError) + + +def test_monitor(): + batch = create_batch(max_latency=5.0) + with mock.patch.object(time, 'sleep') as sleep: + with mock.patch.object(type(batch), '_commit') as _commit: + batch.monitor() + + # The monitor should have waited the given latency. + sleep.assert_called_once_with(5.0) + + # Since `monitor` runs in its own thread, it should call + # the blocking commit implementation. + _commit.assert_called_once_with() + + +def test_monitor_already_committed(): + batch = create_batch(max_latency=5.0) + batch._status = 'something else' + with mock.patch.object(time, 'sleep') as sleep: + batch.monitor() + + # The monitor should have waited the given latency. + sleep.assert_called_once_with(5.0) + + # The status should not have changed. + assert batch._status == 'something else' + + +def test_publish(): + batch = create_batch() + messages = ( + types.PubsubMessage(data=b'foobarbaz'), + types.PubsubMessage(data=b'spameggs'), + types.PubsubMessage(data=b'1335020400'), + ) + + # Publish each of the messages, which should save them to the batch. + for message in messages: + batch.publish(message) + + # There should be three messages on the batch, and three futures. + assert len(batch.messages) == 3 + assert len(batch._futures) == 3 + + # The size should have been incremented by the sum of the size of the + # messages. + assert batch.size == sum([m.ByteSize() for m in messages]) + assert batch.size > 0 # I do not always trust protobuf. + + +def test_publish_dict(): + batch = create_batch() + batch.publish({'data': b'foobarbaz', 'attributes': {'spam': 'eggs'}}) + + # There should be one message on the batch. + assert len(batch.messages) == 1 + + # It should be an actual protobuf Message at this point, with the + # expected values. + message = batch.messages[0] + assert isinstance(message, types.PubsubMessage) + assert message.data == b'foobarbaz' + assert message.attributes == {'spam': 'eggs'} diff --git a/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py b/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py new file mode 100644 index 000000000000..e9b64a202e94 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/publisher/test_futures.py @@ -0,0 +1,118 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +import pytest + +from google.cloud.pubsub_v1.publisher import exceptions +from google.cloud.pubsub_v1.publisher.futures import Future + + +def test_cancel(): + assert Future().cancel() is False + + +def test_cancelled(): + assert Future().cancelled() is False + + +def test_running(): + assert Future().running() is True + + +def test_done(): + future = Future() + assert future.done() is False + future.set_result('12345') + assert future.done() is True + + +def test_exception_no_error(): + future = Future() + future.set_result('12345') + assert future.exception() is None + + +def test_exception_with_error(): + future = Future() + error = RuntimeError('Something really bad happened.') + future.set_exception(error) + + # Make sure that the exception that is returned is the batch's error. + # Also check the type to ensure the batch's error did not somehow + # change internally. + assert future.exception() is error + assert isinstance(future.exception(), RuntimeError) + with pytest.raises(RuntimeError): + future.result() + + +def test_exception_timeout(): + future = Future() + with pytest.raises(exceptions.TimeoutError): + future.exception(timeout=0.01) + + +def test_result_no_error(): + future = Future() + future.set_result('42') + assert future.result() == '42' + + +def test_result_with_error(): + future = Future() + future.set_exception(RuntimeError('Something really bad happened.')) + with pytest.raises(RuntimeError): + future.result() + + +def test_add_done_callback_pending_batch(): + future = Future() + callback = mock.Mock() + future.add_done_callback(callback) + assert len(future._callbacks) == 1 + assert callback in future._callbacks + assert callback.call_count == 0 + + +def test_add_done_callback_completed_batch(): + future = Future() + future.set_result('12345') + callback = mock.Mock(spec=()) + future.add_done_callback(callback) + callback.assert_called_once_with(future) + + +def test_trigger(): + future = Future() + callback = mock.Mock(spec=()) + future.add_done_callback(callback) + assert callback.call_count == 0 + future.set_result('12345') + callback.assert_called_once_with(future) + + +def test_set_result_once_only(): + future = Future() + future.set_result('12345') + with pytest.raises(RuntimeError): + future.set_result('67890') + + +def test_set_exception_once_only(): + future = Future() + future.set_exception(ValueError('wah wah')) + with pytest.raises(RuntimeError): + future.set_exception(TypeError('other wah wah')) diff --git a/pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py b/pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py new file mode 100644 index 000000000000..0054b25262b5 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py @@ -0,0 +1,143 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +import pytest + +from google.auth import credentials +from google.cloud.gapic.pubsub.v1 import publisher_client +from google.cloud.pubsub_v1 import publisher +from google.cloud.pubsub_v1 import types + + +def create_client(): + creds = mock.Mock(spec=credentials.Credentials) + return publisher.Client(credentials=creds) + + +def test_init(): + client = create_client() + + # A plain client should have an `api` (the underlying GAPIC) and a + # batch settings object, which should have the defaults. + assert isinstance(client.api, publisher_client.PublisherClient) + assert client.batch_settings.max_bytes == 5 * (2 ** 20) + assert client.batch_settings.max_latency == 0.05 + assert client.batch_settings.max_messages == 1000 + + +def test_batch_accepting(): + """Establish that an existing batch is returned if it accepts messages.""" + client = create_client() + message = types.PubsubMessage(data=b'foo') + + # At first, there are no batches, so this should return a new batch + # which is also saved to the object. + ante = len(client._batches) + batch = client.batch('topic_name', message, autocommit=False) + assert len(client._batches) == ante + 1 + assert batch is client._batches['topic_name'] + + # A subsequent request should return the same batch. + batch2 = client.batch('topic_name', message, autocommit=False) + assert batch is batch2 + assert batch2 is client._batches['topic_name'] + + +def test_batch_without_autocreate(): + client = create_client() + message = types.PubsubMessage(data=b'foo') + + # If `create=False` is sent, then when the batch is not found, None + # is returned instead. + ante = len(client._batches) + batch = client.batch('topic_name', message, create=False) + assert batch is None + assert len(client._batches) == ante + + +def test_publish(): + client = create_client() + + # Use a mock in lieu of the actual batch class; set the mock up to claim + # indiscriminately that it accepts all messages. + batch = mock.Mock(spec=client._batch_class) + batch.will_accept.return_value = True + client._batches['topic_name'] = batch + + # Begin publishing. + client.publish('topic_name', b'spam') + client.publish('topic_name', b'foo', bar='baz') + + # The batch's publish method should have been called twice. + assert batch.publish.call_count == 2 + + # In both cases + # The first call should correspond to the first message. + _, args, _ = batch.publish.mock_calls[0] + assert args[0].data == b'spam' + assert not args[0].attributes + + # The second call should correspond to the second message. + _, args, _ = batch.publish.mock_calls[1] + assert args[0].data == b'foo' + assert args[0].attributes == {u'bar': u'baz'} + + +def test_publish_data_not_bytestring_error(): + client = create_client() + with pytest.raises(TypeError): + client.publish('topic_name', u'This is a text string.') + with pytest.raises(TypeError): + client.publish('topic_name', 42) + + +def test_publish_attrs_bytestring(): + client = create_client() + + # Use a mock in lieu of the actual batch class; set the mock up to claim + # indiscriminately that it accepts all messages. + batch = mock.Mock(spec=client._batch_class) + batch.will_accept.return_value = True + client._batches['topic_name'] = batch + + # Begin publishing. + client.publish('topic_name', b'foo', bar=b'baz') + + # The attributes should have been sent as text. + _, args, _ = batch.publish.mock_calls[0] + assert args[0].data == b'foo' + assert args[0].attributes == {u'bar': u'baz'} + + +def test_publish_attrs_type_error(): + client = create_client() + with pytest.raises(TypeError): + client.publish('topic_name', b'foo', answer=42) + + +def test_gapic_instance_method(): + client = create_client() + with mock.patch.object(client.api, '_create_topic', autospec=True) as ct: + client.create_topic('projects/foo/topics/bar') + assert ct.call_count == 1 + _, args, _ = ct.mock_calls[0] + assert args[0] == types.Topic(name='projects/foo/topics/bar') + + +def test_gapic_class_method(): + client = create_client() + answer = client.topic_path('foo', 'bar') + assert answer == 'projects/foo/topics/bar' diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py new file mode 100644 index 000000000000..2a3429fbc5b3 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_consumer.py @@ -0,0 +1,117 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import queue + +import mock + +import pytest + +from google.auth import credentials +from google.cloud.pubsub_v1 import subscriber +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber import _consumer +from google.cloud.pubsub_v1.subscriber import _helper_threads +from google.cloud.pubsub_v1.subscriber.policy import thread + + +def create_consumer(): + creds = mock.Mock(spec=credentials.Credentials) + client = subscriber.Client(credentials=creds) + subscription = client.subscribe('sub_name_e') + return _consumer.Consumer(policy=subscription) + + +def test_send_request(): + consumer = create_consumer() + request = types.StreamingPullRequest(subscription='foo') + with mock.patch.object(queue.Queue, 'put') as put: + consumer.send_request(request) + put.assert_called_once_with(request) + + +def test_request_generator_thread(): + consumer = create_consumer() + generator = consumer._request_generator_thread() + + # The first request that comes from the request generator thread + # should always be the initial request. + initial_request = next(generator) + assert initial_request.subscription == 'sub_name_e' + assert initial_request.stream_ack_deadline_seconds == 10 + + # Subsequent requests correspond to items placed in the request queue. + consumer.send_request(types.StreamingPullRequest(ack_ids=['i'])) + request = next(generator) + assert request.ack_ids == ['i'] + + # The poison pill should stop the loop. + consumer.send_request(_helper_threads.STOP) + with pytest.raises(StopIteration): + next(generator) + + +def test_blocking_consume(): + consumer = create_consumer() + Policy = type(consumer._policy) + + # Establish that we get responses until we run out of them. + with mock.patch.object(Policy, 'call_rpc', autospec=True) as call_rpc: + call_rpc.return_value = (mock.sentinel.A, mock.sentinel.B) + with mock.patch.object(Policy, 'on_response', autospec=True) as on_res: + consumer._blocking_consume() + assert on_res.call_count == 2 + assert on_res.mock_calls[0][1][1] == mock.sentinel.A + assert on_res.mock_calls[1][1][1] == mock.sentinel.B + + +def test_blocking_consume_keyboard_interrupt(): + consumer = create_consumer() + Policy = type(consumer._policy) + + # Establish that we get responses until we are sent the exiting event. + with mock.patch.object(Policy, 'call_rpc', autospec=True) as call_rpc: + call_rpc.return_value = (mock.sentinel.A, mock.sentinel.B) + with mock.patch.object(Policy, 'on_response', autospec=True) as on_res: + on_res.side_effect = KeyboardInterrupt + consumer._blocking_consume() + on_res.assert_called_once_with(consumer._policy, mock.sentinel.A) + + +@mock.patch.object(thread.Policy, 'call_rpc', autospec=True) +@mock.patch.object(thread.Policy, 'on_response', autospec=True) +@mock.patch.object(thread.Policy, 'on_exception', autospec=True) +def test_blocking_consume_exception_reraise(on_exc, on_res, call_rpc): + consumer = create_consumer() + + # Establish that we get responses until we are sent the exiting event. + call_rpc.return_value = (mock.sentinel.A, mock.sentinel.B) + on_res.side_effect = TypeError('Bad things!') + on_exc.side_effect = on_res.side_effect + with pytest.raises(TypeError): + consumer._blocking_consume() + + +def test_start_consuming(): + consumer = create_consumer() + helper_threads = consumer.helper_threads + with mock.patch.object(helper_threads, 'start', autospec=True) as start: + consumer.start_consuming() + assert consumer._exiting.is_set() is False + assert consumer.active is True + start.assert_called_once_with( + 'consume bidirectional stream', + consumer._request_queue, + consumer._blocking_consume, + ) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py new file mode 100644 index 000000000000..84775f0be2c1 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_helper_threads.py @@ -0,0 +1,125 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import queue +import threading + +import mock + +from google.cloud.pubsub_v1.subscriber import _helper_threads + + +def test_start(): + registry = _helper_threads.HelperThreadRegistry() + queue_ = queue.Queue() + target = mock.Mock(spec=()) + with mock.patch.object(threading.Thread, 'start', autospec=True) as start: + registry.start('foo', queue_, target) + assert start.called + + +def test_stop_noop(): + registry = _helper_threads.HelperThreadRegistry() + assert len(registry._helper_threads) == 0 + registry.stop('foo') + assert len(registry._helper_threads) == 0 + + +def test_stop_dead_thread(): + registry = _helper_threads.HelperThreadRegistry() + registry._helper_threads['foo'] = _helper_threads._HelperThread( + name='foo', + queue=None, + thread=threading.Thread(target=lambda: None), + ) + assert len(registry._helper_threads) == 1 + registry.stop('foo') + assert len(registry._helper_threads) == 0 + + +@mock.patch.object(queue.Queue, 'put') +@mock.patch.object(threading.Thread, 'is_alive') +@mock.patch.object(threading.Thread, 'join') +def test_stop_alive_thread(join, is_alive, put): + is_alive.return_value = True + + # Set up a registry with a helper thread in it. + registry = _helper_threads.HelperThreadRegistry() + registry._helper_threads['foo'] = _helper_threads._HelperThread( + name='foo', + queue=queue.Queue(), + thread=threading.Thread(target=lambda: None), + ) + + # Assert that the helper thread is present, and removed correctly + # on stop. + assert len(registry._helper_threads) == 1 + registry.stop('foo') + assert len(registry._helper_threads) == 0 + + # Assert that all of our mocks were called in the expected manner. + is_alive.assert_called_once_with() + join.assert_called_once_with() + put.assert_called_once_with(_helper_threads.STOP) + + +def test_stop_all(): + registry = _helper_threads.HelperThreadRegistry() + registry._helper_threads['foo'] = _helper_threads._HelperThread( + name='foo', + queue=None, + thread=threading.Thread(target=lambda: None), + ) + assert len(registry._helper_threads) == 1 + registry.stop_all() + assert len(registry._helper_threads) == 0 + + +def test_stop_all_noop(): + registry = _helper_threads.HelperThreadRegistry() + assert len(registry._helper_threads) == 0 + registry.stop_all() + assert len(registry._helper_threads) == 0 + + +def test_queue_callback_thread(): + queue_ = queue.Queue() + callback = mock.Mock(spec=()) + qct = _helper_threads.QueueCallbackThread(queue_, callback) + + # Set up an appropriate mock for the queue, and call the queue callback + # thread. + with mock.patch.object(queue.Queue, 'get') as get: + get.side_effect = (mock.sentinel.A, _helper_threads.STOP) + qct() + + # Assert that we got the expected calls. + assert get.call_count == 2 + callback.assert_called_once_with(mock.sentinel.A) + + +def test_queue_callback_thread_exception(): + queue_ = queue.Queue() + callback = mock.Mock(spec=(), side_effect=(Exception,)) + qct = _helper_threads.QueueCallbackThread(queue_, callback) + + # Set up an appropriate mock for the queue, and call the queue callback + # thread. + with mock.patch.object(queue.Queue, 'get') as get: + get.side_effect = (mock.sentinel.A, _helper_threads.STOP) + qct() + + # Assert that we got the expected calls. + assert get.call_count == 2 + callback.assert_called_once_with(mock.sentinel.A) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_histogram.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_histogram.py new file mode 100644 index 000000000000..23474a19d116 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_histogram.py @@ -0,0 +1,84 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.pubsub_v1.subscriber import _histogram + + +def test_init(): + data = {} + histo = _histogram.Histogram(data=data) + assert histo._data is data + assert len(histo) == 0 + + +def test_contains(): + histo = _histogram.Histogram() + histo.add(10) + histo.add(20) + assert 10 in histo + assert 20 in histo + assert 30 not in histo + + +def test_max(): + histo = _histogram.Histogram() + assert histo.max == 600 + histo.add(120) + assert histo.max == 120 + histo.add(150) + assert histo.max == 150 + histo.add(20) + assert histo.max == 150 + + +def test_min(): + histo = _histogram.Histogram() + assert histo.min == 10 + histo.add(60) + assert histo.min == 60 + histo.add(30) + assert histo.min == 30 + histo.add(120) + assert histo.min == 30 + + +def test_add(): + histo = _histogram.Histogram() + histo.add(60) + assert histo._data[60] == 1 + histo.add(60) + assert histo._data[60] == 2 + + +def test_add_lower_limit(): + histo = _histogram.Histogram() + histo.add(5) + assert 5 not in histo + assert 10 in histo + + +def test_add_upper_limit(): + histo = _histogram.Histogram() + histo.add(12000) + assert 12000 not in histo + assert 600 in histo + + +def test_percentile(): + histo = _histogram.Histogram() + [histo.add(i) for i in range(101, 201)] + assert histo.percentile(100) == 200 + assert histo.percentile(101) == 200 + assert histo.percentile(99) == 199 + assert histo.percentile(1) == 101 diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py new file mode 100644 index 000000000000..a3a1e16f027e --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py @@ -0,0 +1,102 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import queue +import time + +import mock + +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber import message + + +def create_message(data, ack_id='ACKID', **attrs): + with mock.patch.object(message.Message, 'lease') as lease: + with mock.patch.object(time, 'time') as time_: + time_.return_value = 1335020400 + msg = message.Message(types.PubsubMessage( + attributes=attrs, + data=data, + message_id='message_id', + publish_time=types.Timestamp(seconds=1335020400 - 86400), + ), ack_id, queue.Queue()) + lease.assert_called_once_with() + return msg + + +def test_attributes(): + msg = create_message(b'foo', baz='bacon', spam='eggs') + assert msg.attributes == {'baz': 'bacon', 'spam': 'eggs'} + + +def test_data(): + msg = create_message(b'foo') + assert msg.data == b'foo' + + +def test_publish_time(): + msg = create_message(b'foo') + assert msg.publish_time == types.Timestamp(seconds=1335020400 - 86400) + + +def test_ack(): + msg = create_message(b'foo', ack_id='bogus_ack_id') + with mock.patch.object(msg._request_queue, 'put') as put: + with mock.patch.object(message.Message, 'drop') as drop: + msg.ack() + put.assert_called_once_with(('ack', { + 'ack_id': 'bogus_ack_id', + 'byte_size': 25, + 'time_to_ack': mock.ANY, + })) + + +def test_drop(): + msg = create_message(b'foo', ack_id='bogus_ack_id') + with mock.patch.object(msg._request_queue, 'put') as put: + msg.drop() + put.assert_called_once_with(('drop', { + 'ack_id': 'bogus_ack_id', + 'byte_size': 25, + })) + + +def test_lease(): + msg = create_message(b'foo', ack_id='bogus_ack_id') + with mock.patch.object(msg._request_queue, 'put') as put: + msg.lease() + put.assert_called_once_with(('lease', { + 'ack_id': 'bogus_ack_id', + 'byte_size': 25, + })) + + +def test_modify_ack_deadline(): + msg = create_message(b'foo', ack_id='bogus_id') + with mock.patch.object(msg._request_queue, 'put') as put: + msg.modify_ack_deadline(60) + put.assert_called_once_with(('modify_ack_deadline', { + 'ack_id': 'bogus_id', + 'seconds': 60, + })) + + +def test_nack(): + msg = create_message(b'foo', ack_id='bogus_id') + with mock.patch.object(msg._request_queue, 'put') as put: + msg.nack() + put.assert_called_once_with(('nack', { + 'ack_id': 'bogus_id', + 'byte_size': 25, + })) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py new file mode 100644 index 000000000000..df963424ccb9 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_base.py @@ -0,0 +1,231 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +import mock + +from google.auth import credentials +from google.cloud.pubsub_v1 import subscriber +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber.policy import thread + + +def create_policy(flow_control=types.FlowControl()): + creds = mock.Mock(spec=credentials.Credentials) + client = subscriber.Client(credentials=creds) + return thread.Policy(client, 'sub_name_d', flow_control=flow_control) + + +def test_ack_deadline(): + policy = create_policy() + assert policy.ack_deadline == 10 + policy.histogram.add(20) + assert policy.ack_deadline == 20 + policy.histogram.add(10) + assert policy.ack_deadline == 20 + + +def test_get_initial_request(): + policy = create_policy() + initial_request = policy.get_initial_request() + assert isinstance(initial_request, types.StreamingPullRequest) + assert initial_request.subscription == 'sub_name_d' + assert initial_request.stream_ack_deadline_seconds == 10 + + +def test_managed_ack_ids(): + policy = create_policy() + + # Ensure we always get a set back, even if the property is not yet set. + managed_ack_ids = policy.managed_ack_ids + assert isinstance(managed_ack_ids, set) + + # Ensure that multiple calls give the same actual object back. + assert managed_ack_ids is policy.managed_ack_ids + + +def test_subscription(): + policy = create_policy() + assert policy.subscription == 'sub_name_d' + + +def test_ack(): + policy = create_policy() + policy._consumer.active = True + with mock.patch.object(policy._consumer, 'send_request') as send_request: + policy.ack('ack_id_string', 20) + send_request.assert_called_once_with(types.StreamingPullRequest( + ack_ids=['ack_id_string'], + )) + assert len(policy.histogram) == 1 + assert 20 in policy.histogram + + +def test_ack_no_time(): + policy = create_policy() + policy._consumer.active = True + with mock.patch.object(policy._consumer, 'send_request') as send_request: + policy.ack('ack_id_string') + send_request.assert_called_once_with(types.StreamingPullRequest( + ack_ids=['ack_id_string'], + )) + assert len(policy.histogram) == 0 + + +def test_ack_paused(): + policy = create_policy() + policy._paused = True + policy._consumer.active = False + with mock.patch.object(policy, 'open') as open_: + policy.ack('ack_id_string') + open_.assert_called() + assert 'ack_id_string' in policy._ack_on_resume + + +def test_call_rpc(): + policy = create_policy() + with mock.patch.object(policy._client.api, 'streaming_pull') as pull: + policy.call_rpc(mock.sentinel.GENERATOR) + pull.assert_called_once_with(mock.sentinel.GENERATOR) + + +def test_drop(): + policy = create_policy() + policy.managed_ack_ids.add('ack_id_string') + policy._bytes = 20 + policy.drop('ack_id_string', 20) + assert len(policy.managed_ack_ids) == 0 + assert policy._bytes == 0 + + # Do this again to establish idempotency. + policy.drop('ack_id_string', 20) + assert len(policy.managed_ack_ids) == 0 + assert policy._bytes == 0 + + +def test_drop_below_threshold(): + """Establish that we resume a paused subscription. + + If the subscription is paused, and we drop sufficiently below + the flow control thresholds, it should resume. + """ + policy = create_policy() + policy.managed_ack_ids.add('ack_id_string') + policy._bytes = 20 + policy._paused = True + with mock.patch.object(policy, 'open') as open_: + policy.drop(ack_id='ack_id_string', byte_size=20) + open_.assert_called_once_with(policy._callback) + assert policy._paused is False + + +def test_load(): + flow_control = types.FlowControl(max_messages=10, max_bytes=1000) + policy = create_policy(flow_control=flow_control) + + # This should mean that our messages count is at 10%, and our bytes + # are at 15%; the ._load property should return the higher (0.15). + policy.lease(ack_id='one', byte_size=150) + assert policy._load == 0.15 + + # After this message is added, the messages should be higher at 20% + # (versus 16% for bytes). + policy.lease(ack_id='two', byte_size=10) + assert policy._load == 0.2 + + # Returning a number above 100% is fine. + policy.lease(ack_id='three', byte_size=1000) + assert policy._load == 1.16 + + +def test_modify_ack_deadline(): + policy = create_policy() + with mock.patch.object(policy._consumer, 'send_request') as send_request: + policy.modify_ack_deadline('ack_id_string', 60) + send_request.assert_called_once_with(types.StreamingPullRequest( + modify_deadline_ack_ids=['ack_id_string'], + modify_deadline_seconds=[60], + )) + + +def test_maintain_leases_inactive_consumer(): + policy = create_policy() + policy._consumer.active = False + assert policy.maintain_leases() is None + + +def test_maintain_leases_ack_ids(): + policy = create_policy() + policy._consumer.active = True + policy.lease('my ack id', 50) + + # Mock the sleep object. + with mock.patch.object(time, 'sleep', autospec=True) as sleep: + def trigger_inactive(seconds): + assert 0 < seconds < 10 + policy._consumer.active = False + sleep.side_effect = trigger_inactive + + # Also mock the consumer, which sends the request. + with mock.patch.object(policy._consumer, 'send_request') as send: + policy.maintain_leases() + send.assert_called_once_with(types.StreamingPullRequest( + modify_deadline_ack_ids=['my ack id'], + modify_deadline_seconds=[10], + )) + sleep.assert_called() + + +def test_maintain_leases_no_ack_ids(): + policy = create_policy() + policy._consumer.active = True + with mock.patch.object(time, 'sleep', autospec=True) as sleep: + def trigger_inactive(seconds): + assert 0 < seconds < 10 + policy._consumer.active = False + sleep.side_effect = trigger_inactive + policy.maintain_leases() + sleep.assert_called() + + +def test_lease(): + policy = create_policy() + policy.lease(ack_id='ack_id_string', byte_size=20) + assert len(policy.managed_ack_ids) == 1 + assert policy._bytes == 20 + + # Do this again to prove idempotency. + policy.lease(ack_id='ack_id_string', byte_size=20) + assert len(policy.managed_ack_ids) == 1 + assert policy._bytes == 20 + + +def test_lease_above_threshold(): + flow_control = types.FlowControl(max_messages=2) + policy = create_policy(flow_control=flow_control) + with mock.patch.object(policy, 'close') as close: + policy.lease(ack_id='first_ack_id', byte_size=20) + assert close.call_count == 0 + policy.lease(ack_id='second_ack_id', byte_size=25) + close.assert_called_once_with() + + +def test_nack(): + policy = create_policy() + with mock.patch.object(policy, 'modify_ack_deadline') as mad: + with mock.patch.object(policy, 'drop') as drop: + policy.nack(ack_id='ack_id_string', byte_size=10) + drop.assert_called_once_with(ack_id='ack_id_string', byte_size=10) + mad.assert_called_once_with(ack_id='ack_id_string', seconds=0) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py new file mode 100644 index 000000000000..76aec184815e --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_policy_thread.py @@ -0,0 +1,120 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +from concurrent import futures +import queue +import threading + +import grpc + +import mock + +import pytest + +from google.auth import credentials +from google.cloud.pubsub_v1 import subscriber +from google.cloud.pubsub_v1 import types +from google.cloud.pubsub_v1.subscriber import _helper_threads +from google.cloud.pubsub_v1.subscriber import message +from google.cloud.pubsub_v1.subscriber.policy import thread + + +def create_policy(**kwargs): + creds = mock.Mock(spec=credentials.Credentials) + client = subscriber.Client(credentials=creds) + return thread.Policy(client, 'sub_name_c', **kwargs) + + +def test_init(): + policy = create_policy() + policy._callback(None) + + +def test_init_with_executor(): + executor = futures.ThreadPoolExecutor(max_workers=25) + policy = create_policy(executor=executor, queue=queue.Queue()) + assert policy._executor is executor + + +def test_close(): + policy = create_policy() + consumer = policy._consumer + with mock.patch.object(consumer, 'stop_consuming') as stop_consuming: + policy.close() + stop_consuming.assert_called_once_with() + assert 'callback request worker' not in policy._consumer.helper_threads + + +@mock.patch.object(_helper_threads.HelperThreadRegistry, 'start') +@mock.patch.object(threading.Thread, 'start') +def test_open(thread_start, htr_start): + policy = create_policy() + with mock.patch.object(policy._consumer, 'start_consuming') as consuming: + policy.open(mock.sentinel.CALLBACK) + assert policy._callback is mock.sentinel.CALLBACK + consuming.assert_called_once_with() + htr_start.assert_called() + thread_start.assert_called() + + +def test_on_callback_request(): + policy = create_policy() + with mock.patch.object(policy, 'call_rpc') as call_rpc: + policy.on_callback_request(('call_rpc', {'something': 42})) + call_rpc.assert_called_once_with(something=42) + + +def test_on_exception_deadline_exceeded(): + policy = create_policy() + exc = mock.Mock(spec=('code',)) + exc.code.return_value = grpc.StatusCode.DEADLINE_EXCEEDED + assert policy.on_exception(exc) is None + + +def test_on_exception_other(): + policy = create_policy() + exc = TypeError('wahhhhhh') + with pytest.raises(TypeError): + policy.on_exception(exc) + + +def test_on_response(): + callback = mock.Mock(spec=()) + + # Set up the policy. + policy = create_policy() + policy._callback = callback + + # Set up the messages to send. + messages = ( + types.PubsubMessage(data=b'foo', message_id='1'), + types.PubsubMessage(data=b'bar', message_id='2'), + ) + + # Set up a valid response. + response = types.StreamingPullResponse( + received_messages=[ + {'ack_id': 'fack', 'message': messages[0]}, + {'ack_id': 'back', 'message': messages[1]}, + ], + ) + + # Actually run the method and prove that the callback was + # called in the expected way. + policy.on_response(response) + assert callback.call_count == 2 + for call in callback.mock_calls: + assert isinstance(call[1][0], message.Message) diff --git a/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py b/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py new file mode 100644 index 000000000000..50e90fead181 --- /dev/null +++ b/pubsub/tests/unit/pubsub_v1/subscriber/test_subscriber_client.py @@ -0,0 +1,44 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from google.auth import credentials +from google.cloud.pubsub_v1 import subscriber +from google.cloud.pubsub_v1.subscriber.policy import thread + + +def create_client(): + creds = mock.Mock(spec=credentials.Credentials) + return subscriber.Client(credentials=creds) + + +def test_init(): + client = create_client() + assert client._policy_class is thread.Policy + + +def test_subscribe(): + client = create_client() + subscription = client.subscribe('sub_name_a') + assert isinstance(subscription, thread.Policy) + + +def test_subscribe_with_callback(): + client = create_client() + callback = mock.Mock() + with mock.patch.object(thread.Policy, 'open') as open_: + subscription = client.subscribe('sub_name_b', callback) + open_.assert_called_once_with(callback) + assert isinstance(subscription, thread.Policy) diff --git a/pubsub/tests/unit/test_pubsub.py b/pubsub/tests/unit/test_pubsub.py new file mode 100644 index 000000000000..605dbddd7601 --- /dev/null +++ b/pubsub/tests/unit/test_pubsub.py @@ -0,0 +1,22 @@ +# Copyright 2017, Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud import pubsub +from google.cloud import pubsub_v1 + + +def test_exported_things(): + assert pubsub.PublisherClient is pubsub_v1.PublisherClient + assert pubsub.SubscriberClient is pubsub_v1.SubscriberClient + assert pubsub.types is pubsub_v1.types