diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 41a2727dfa41..c7ff6f8e2240 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -283,32 +283,6 @@ should either be: ``EXTRA_TOX_ENVS``. This value is unencrypted in ``gcloud-python-wheels`` to make ongoing maintenance easier. -Shared Code with External Projects ----------------------------------- - -In order to enable high-quality HTTP transfer of large data (for Cloud -Storage), we have temporarily included some code from the -`apitools `__ library. - -We have chosen to partially include it, rather than include it as -a dependency because - -- The library is not yet included on PyPI. -- The library's ``protorpc`` dependency is not Python 3 friendly, so - would block us from Python 3 support if fully included. - -The included code in lives in the -`_gcloud_vendor `__ -directory. It is a snapshot of the ``e5a5c36e24926310712d20b93b4cdd02424a81f5`` -commit from the main project imported in -``4c27079cf6d7f9814b36cfd16f3402455f768094``. In addition to the raw import, -we have customized (e.g. rewriting imports) for our library: - -- ``334961054d875641d150eec4d6938f6f824ea655`` -- ``565750ee7d19742b520dd62e2a4ff38325987284`` -- ``67b06019549a4db8168ff4c5171c9d701ac94a15`` -- ``f4a53ee64fad5f3d7f29a0341e6a72a060edfcc2`` - Supported Python Versions ------------------------- diff --git a/_gcloud_vendor/__init__.py b/_gcloud_vendor/__init__.py deleted file mode 100644 index 9ee34b0c867b..000000000000 --- a/_gcloud_vendor/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Dependencies "vendored in", due to dependencies, Python versions, etc. - -Current set ------------ - -``apitools`` (pending release to PyPI, plus acceptable Python version - support for its dependencies). Review before M2. -""" diff --git a/_gcloud_vendor/apitools/__init__.py b/_gcloud_vendor/apitools/__init__.py deleted file mode 100644 index 9870b5e53b94..000000000000 --- a/_gcloud_vendor/apitools/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Package stub.""" diff --git a/_gcloud_vendor/apitools/base/__init__.py b/_gcloud_vendor/apitools/base/__init__.py deleted file mode 100644 index 9870b5e53b94..000000000000 --- a/_gcloud_vendor/apitools/base/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Package stub.""" diff --git a/_gcloud_vendor/apitools/base/py/__init__.py b/_gcloud_vendor/apitools/base/py/__init__.py deleted file mode 100644 index 9870b5e53b94..000000000000 --- a/_gcloud_vendor/apitools/base/py/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Package stub.""" diff --git a/_gcloud_vendor/apitools/base/py/exceptions.py b/_gcloud_vendor/apitools/base/py/exceptions.py deleted file mode 100644 index 55faa4970ebb..000000000000 --- a/_gcloud_vendor/apitools/base/py/exceptions.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python -"""Exceptions for generated client libraries.""" - - -class Error(Exception): - """Base class for all exceptions.""" - - -class TypecheckError(Error, TypeError): - """An object of an incorrect type is provided.""" - - -class NotFoundError(Error): - """A specified resource could not be found.""" - - -class UserError(Error): - """Base class for errors related to user input.""" - - -class InvalidDataError(Error): - """Base class for any invalid data error.""" - - -class CommunicationError(Error): - """Any communication error talking to an API server.""" - - -class HttpError(CommunicationError): - """Error making a request. Soon to be HttpError.""" - - def __init__(self, response, content, url): - super(HttpError, self).__init__() - self.response = response - self.content = content - self.url = url - - def __str__(self): - content = self.content.decode('ascii', 'replace') - return 'HttpError accessing <%s>: response: <%s>, content <%s>' % ( - self.url, self.response, content) - - @property - def status_code(self): - # TODO(craigcitro): Turn this into something better than a - # KeyError if there is no status. - return int(self.response['status']) - - @classmethod - def FromResponse(cls, http_response): - return cls(http_response.info, http_response.content, - http_response.request_url) - - -class InvalidUserInputError(InvalidDataError): - """User-provided input is invalid.""" - - -class InvalidDataFromServerError(InvalidDataError, CommunicationError): - """Data received from the server is malformed.""" - - -class BatchError(Error): - """Error generated while constructing a batch request.""" - - -class ConfigurationError(Error): - """Base class for configuration errors.""" - - -class GeneratedClientError(Error): - """The generated client configuration is invalid.""" - - -class ConfigurationValueError(UserError): - """Some part of the user-specified client configuration is invalid.""" - - -class ResourceUnavailableError(Error): - """User requested an unavailable resource.""" - - -class CredentialsError(Error): - """Errors related to invalid credentials.""" - - -class TransferError(CommunicationError): - """Errors related to transfers.""" - - -class TransferInvalidError(TransferError): - """The given transfer is invalid.""" - - -class NotYetImplementedError(GeneratedClientError): - """This functionality is not yet implemented.""" - - -class StreamExhausted(Error): - """Attempted to read more bytes from a stream than were available.""" diff --git a/_gcloud_vendor/apitools/base/py/http_wrapper.py b/_gcloud_vendor/apitools/base/py/http_wrapper.py deleted file mode 100644 index 8b8b6cfc08aa..000000000000 --- a/_gcloud_vendor/apitools/base/py/http_wrapper.py +++ /dev/null @@ -1,183 +0,0 @@ -#!/usr/bin/env python -"""HTTP wrapper for apitools. - -This library wraps the underlying http library we use, which is -currently httplib2. -""" - -import collections -import logging -import socket -import time - -import httplib2 -from six.moves import http_client -from six.moves import range -from six.moves.urllib.parse import urlsplit - -from _gcloud_vendor.apitools.base.py import exceptions -from _gcloud_vendor.apitools.base.py import util - -__all__ = [ - 'GetHttp', - 'MakeRequest', - 'Request', -] - - -# 308 and 429 don't have names in httplib. -RESUME_INCOMPLETE = 308 -TOO_MANY_REQUESTS = 429 -_REDIRECT_STATUS_CODES = ( - http_client.MOVED_PERMANENTLY, - http_client.FOUND, - http_client.SEE_OTHER, - http_client.TEMPORARY_REDIRECT, - RESUME_INCOMPLETE, -) - - -class Request(object): - """Class encapsulating the data for an HTTP request.""" - - def __init__(self, url='', http_method='GET', headers=None, body=''): - self.url = url - self.http_method = http_method - self.headers = headers or {} - self.__body = None - self.body = body - - @property - def body(self): - return self.__body - - @body.setter - def body(self, value): - self.__body = value - if value is not None: - self.headers['content-length'] = str(len(self.__body)) - else: - self.headers.pop('content-length', None) - - -# Note: currently the order of fields here is important, since we want -# to be able to pass in the result from httplib2.request. -class Response(collections.namedtuple( - 'HttpResponse', ['info', 'content', 'request_url'])): - """Class encapsulating data for an HTTP response.""" - __slots__ = () - - def __len__(self): - def ProcessContentRange(content_range): - _, _, range_spec = content_range.partition(' ') - byte_range, _, _ = range_spec.partition('/') - start, _, end = byte_range.partition('-') - return int(end) - int(start) + 1 - - if '-content-encoding' in self.info and 'content-range' in self.info: - # httplib2 rewrites content-length in the case of a compressed - # transfer; we can't trust the content-length header in that - # case, but we *can* trust content-range, if it's present. - return ProcessContentRange(self.info['content-range']) - elif 'content-length' in self.info: - return int(self.info.get('content-length')) - elif 'content-range' in self.info: - return ProcessContentRange(self.info['content-range']) - return len(self.content) - - @property - def status_code(self): - return int(self.info['status']) - - @property - def retry_after(self): - if 'retry-after' in self.info: - return int(self.info['retry-after']) - - @property - def is_redirect(self): - return (self.status_code in _REDIRECT_STATUS_CODES and - 'location' in self.info) - - -def MakeRequest(http, http_request, retries=5, redirections=5): - """Send http_request via the given http. - - This wrapper exists to handle translation between the plain httplib2 - request/response types and the Request and Response types above. - This will also be the hook for error/retry handling. - - Args: - http: An httplib2.Http instance, or a http multiplexer that delegates to - an underlying http, for example, HTTPMultiplexer. - http_request: A Request to send. - retries: (int, default 5) Number of retries to attempt on 5XX replies. - redirections: (int, default 5) Number of redirects to follow. - - Returns: - A Response object. - - Raises: - InvalidDataFromServerError: if there is no response after retries. - """ - response = None - exc = None - connection_type = None - # Handle overrides for connection types. This is used if the caller - # wants control over the underlying connection for managing callbacks - # or hash digestion. - if getattr(http, 'connections', None): - url_scheme = urlsplit(http_request.url).scheme - if url_scheme and url_scheme in http.connections: - connection_type = http.connections[url_scheme] - for retry in range(retries + 1): - # Note that the str() calls here are important for working around - # some funny business with message construction and unicode in - # httplib itself. See, eg, - # http://bugs.python.org/issue11898 - info = None - try: - info, content = http.request( - str(http_request.url), method=str(http_request.http_method), - body=http_request.body, headers=http_request.headers, - redirections=redirections, connection_type=connection_type) - except http_client.BadStatusLine as e: - logging.error('Caught BadStatusLine from httplib, retrying: %s', e) - exc = e - except socket.error as e: - if http_request.http_method != 'GET': - raise - logging.error('Caught socket error, retrying: %s', e) - exc = e - except http_client.IncompleteRead as e: - if http_request.http_method != 'GET': - raise - logging.error('Caught IncompleteRead error, retrying: %s', e) - exc = e - if info is not None: - response = Response(info, content, http_request.url) - if (response.status_code < 500 and - response.status_code != TOO_MANY_REQUESTS and - not response.retry_after): - break - logging.info('Retrying request to url <%s> after status code %s.', - response.request_url, response.status_code) - elif isinstance(exc, http_client.IncompleteRead): - logging.info('Retrying request to url <%s> after incomplete read.', - str(http_request.url)) - else: - logging.info('Retrying request to url <%s> after connection break.', - str(http_request.url)) - # TODO(craigcitro): Make this timeout configurable. - if response: - time.sleep(response.retry_after or util.CalculateWaitForRetry(retry)) - else: - time.sleep(util.CalculateWaitForRetry(retry)) - if response is None: - raise exceptions.InvalidDataFromServerError( - 'HTTP error on final retry: %s' % exc) - return response - - -def GetHttp(): - return httplib2.Http() diff --git a/_gcloud_vendor/apitools/base/py/transfer.py b/_gcloud_vendor/apitools/base/py/transfer.py deleted file mode 100644 index c98d5798b5eb..000000000000 --- a/_gcloud_vendor/apitools/base/py/transfer.py +++ /dev/null @@ -1,717 +0,0 @@ -#!/usr/bin/env python -"""Upload and download support for apitools.""" -from __future__ import print_function - -import email.generator as email_generator -import email.mime.multipart as mime_multipart -import email.mime.nonmultipart as mime_nonmultipart -import io -import json -import mimetypes -import os -import threading - -from six.moves import http_client - -from _gcloud_vendor.apitools.base.py import exceptions -from _gcloud_vendor.apitools.base.py import http_wrapper -from _gcloud_vendor.apitools.base.py import util - -__all__ = [ - 'Download', - 'Upload', -] - -_RESUMABLE_UPLOAD_THRESHOLD = 5 << 20 -_SIMPLE_UPLOAD = 'simple' -_RESUMABLE_UPLOAD = 'resumable' - - -class _Transfer(object): - """Generic bits common to Uploads and Downloads.""" - - def __init__(self, stream, close_stream=False, chunksize=None, - auto_transfer=True, http=None): - self.__bytes_http = None - self.__close_stream = close_stream - self.__http = http - self.__stream = stream - self.__url = None - - self.auto_transfer = auto_transfer - self.chunksize = chunksize or 1048576 - - def __repr__(self): - return str(self) - - @property - def close_stream(self): - return self.__close_stream - - @property - def http(self): - return self.__http - - @property - def bytes_http(self): - return self.__bytes_http or self.http - - @bytes_http.setter - def bytes_http(self, value): - self.__bytes_http = value - - @property - def stream(self): - return self.__stream - - @property - def url(self): - return self.__url - - def _Initialize(self, http, url): - """Initialize this download by setting self.http and self.url. - - We want the user to be able to override self.http by having set - the value in the constructor; in that case, we ignore the provided - http. - - Args: - http: An httplib2.Http instance or None. - url: The url for this transfer. - - Returns: - None. Initializes self. - """ - self.EnsureUninitialized() - if self.http is None: - self.__http = http or http_wrapper.GetHttp() - self.__url = url - - @property - def initialized(self): - return self.url is not None and self.http is not None - - @property - def _type_name(self): - return type(self).__name__ - - def EnsureInitialized(self): - if not self.initialized: - raise exceptions.TransferInvalidError( - 'Cannot use uninitialized %s', self._type_name) - - def EnsureUninitialized(self): - if self.initialized: - raise exceptions.TransferInvalidError( - 'Cannot re-initialize %s', self._type_name) - - def __del__(self): - if self.__close_stream: - self.__stream.close() - - def _ExecuteCallback(self, callback, response): - # TODO(craigcitro): Push these into a queue. - if callback is not None: - threading.Thread(target=callback, args=(response, self)).start() - - -class Download(_Transfer): - """Data for a single download. - - Public attributes: - chunksize: default chunksize to use for transfers. - """ - _ACCEPTABLE_STATUSES = set(( - http_client.OK, - http_client.NO_CONTENT, - http_client.PARTIAL_CONTENT, - http_client.REQUESTED_RANGE_NOT_SATISFIABLE, - )) - _REQUIRED_SERIALIZATION_KEYS = set(( - 'auto_transfer', 'progress', 'total_size', 'url')) - - def __init__(self, *args, **kwds): - super(Download, self).__init__(*args, **kwds) - self.__initial_response = None - self.__progress = 0 - self.__total_size = None - - @property - def progress(self): - return self.__progress - - @classmethod - def FromFile(cls, filename, overwrite=False, auto_transfer=True): - """Create a new download object from a filename.""" - path = os.path.expanduser(filename) - if os.path.exists(path) and not overwrite: - raise exceptions.InvalidUserInputError( - 'File %s exists and overwrite not specified' % path) - return cls(open(path, 'wb'), close_stream=True, auto_transfer=auto_transfer) - - @classmethod - def FromStream(cls, stream, auto_transfer=True): - """Create a new Download object from a stream.""" - return cls(stream, auto_transfer=auto_transfer) - - @classmethod - def FromData(cls, stream, json_data, http=None, auto_transfer=None): - """Create a new Download object from a stream and serialized data.""" - info = json.loads(json_data) - missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) - if missing_keys: - raise exceptions.InvalidDataError( - 'Invalid serialization data, missing keys: %s' % ( - ', '.join(missing_keys))) - download = cls.FromStream(stream) - if auto_transfer is not None: - download.auto_transfer = auto_transfer - else: - download.auto_transfer = info['auto_transfer'] - setattr(download, '_Download__progress', info['progress']) - setattr(download, '_Download__total_size', info['total_size']) - download._Initialize(http, info['url']) # pylint: disable=protected-access - return download - - @property - def serialization_data(self): - self.EnsureInitialized() - return { - 'auto_transfer': self.auto_transfer, - 'progress': self.progress, - 'total_size': self.total_size, - 'url': self.url, - } - - @property - def total_size(self): - return self.__total_size - - def __str__(self): - if not self.initialized: - return 'Download (uninitialized)' - else: - return 'Download with %d/%s bytes transferred from url %s' % ( - self.progress, self.total_size, self.url) - - def ConfigureRequest(self, http_request, url_builder): - url_builder.query_params['alt'] = 'media' - http_request.headers['Range'] = 'bytes=0-%d' % (self.chunksize - 1,) - - def __SetTotal(self, info): - if 'content-range' in info: - _, _, total = info['content-range'].rpartition('/') - if total != '*': - self.__total_size = int(total) - # Note "total_size is None" means we don't know it; if no size - # info was returned on our initial range request, that means we - # have a 0-byte file. (That last statement has been verified - # empirically, but is not clearly documented anywhere.) - if self.total_size is None: - self.__total_size = 0 - - def InitializeDownload(self, http_request, http=None, client=None): - """Initialize this download by making a request. - - Args: - http_request: The HttpRequest to use to initialize this download. - http: The httplib2.Http instance for this request. - client: If provided, let this client process the final URL before - sending any additional requests. If client is provided and - http is not, client.http will be used instead. - """ - self.EnsureUninitialized() - if http is None and client is None: - raise exceptions.UserError('Must provide client or http.') - http = http or client.http - if client is not None: - http_request.url = client.FinalizeTransferUrl(http_request.url) - response = http_wrapper.MakeRequest(self.bytes_http or http, http_request) - if response.status_code not in self._ACCEPTABLE_STATUSES: - raise exceptions.HttpError.FromResponse(response) - self.__initial_response = response - self.__SetTotal(response.info) - url = response.info.get('content-location', response.request_url) - if client is not None: - url = client.FinalizeTransferUrl(url) - self._Initialize(http, url) - # Unless the user has requested otherwise, we want to just - # go ahead and pump the bytes now. - if self.auto_transfer: - self.StreamInChunks() - - @staticmethod - def _ArgPrinter(response, unused_download): - if 'content-range' in response.info: - print('Received %s' % response.info['content-range']) - else: - print('Received %d bytes' % len(response)) - - @staticmethod - def _CompletePrinter(*unused_args): - print('Download complete') - - def __NormalizeStartEnd(self, start, end=None): - if end is not None: - if start < 0: - raise exceptions.TransferInvalidError( - 'Cannot have end index with negative start index') - elif start >= self.total_size: - raise exceptions.TransferInvalidError( - 'Cannot have start index greater than total size') - end = min(end, self.total_size - 1) - if end < start: - raise exceptions.TransferInvalidError( - 'Range requested with end[%s] < start[%s]' % (end, start)) - return start, end - else: - if start < 0: - start = max(0, start + self.total_size) - return start, self.total_size - - def __SetRangeHeader(self, request, start, end=None): - if start < 0: - request.headers['range'] = 'bytes=%d' % start - elif end is None: - request.headers['range'] = 'bytes=%d-' % start - else: - request.headers['range'] = 'bytes=%d-%d' % (start, end) - - def __GetChunk(self, start, end=None, additional_headers=None): - """Retrieve a chunk, and return the full response.""" - self.EnsureInitialized() - end_byte = min(end or start + self.chunksize, self.total_size) - request = http_wrapper.Request(url=self.url) - self.__SetRangeHeader(request, start, end=end_byte) - if additional_headers is not None: - request.headers.update(additional_headers) - return http_wrapper.MakeRequest(self.bytes_http, request) - - def __ProcessResponse(self, response): - """Process this response (by updating self and writing to self.stream).""" - if response.status_code not in self._ACCEPTABLE_STATUSES: - raise exceptions.TransferInvalidError(response.content) - if response.status_code in (http_client.OK, http_client.PARTIAL_CONTENT): - self.stream.write(response.content) - self.__progress += len(response) - elif response.status_code == http_client.NO_CONTENT: - # It's important to write something to the stream for the case - # of a 0-byte download to a file, as otherwise python won't - # create the file. - self.stream.write('') - return response - - def GetRange(self, start, end=None, additional_headers=None): - """Retrieve a given byte range from this download, inclusive. - - Range must be of one of these three forms: - * 0 <= start, end = None: Fetch from start to the end of the file. - * 0 <= start <= end: Fetch the bytes from start to end. - * start < 0, end = None: Fetch the last -start bytes of the file. - - (These variations correspond to those described in the HTTP 1.1 - protocol for range headers in RFC 2616, sec. 14.35.1.) - - Args: - start: (int) Where to start fetching bytes. (See above.) - end: (int, optional) Where to stop fetching bytes. (See above.) - additional_headers: (bool, optional) Any additional headers to - pass with the request. - - Returns: - None. Streams bytes into self.stream. - """ - self.EnsureInitialized() - progress, end = self.__NormalizeStartEnd(start, end) - while progress < end: - chunk_end = min(progress + self.chunksize, end) - response = self.__GetChunk(progress, end=chunk_end, - additional_headers=additional_headers) - response = self.__ProcessResponse(response) - progress += len(response) - if not response: - raise exceptions.TransferInvalidError( - 'Zero bytes unexpectedly returned in download response') - - def StreamInChunks(self, callback=None, finish_callback=None, - additional_headers=None): - """Stream the entire download.""" - callback = callback or self._ArgPrinter - finish_callback = finish_callback or self._CompletePrinter - - self.EnsureInitialized() - while True: - if self.__initial_response is not None: - response = self.__initial_response - self.__initial_response = None - else: - response = self.__GetChunk(self.progress, - additional_headers=additional_headers) - response = self.__ProcessResponse(response) - self._ExecuteCallback(callback, response) - if (response.status_code == http_client.OK or - self.progress >= self.total_size): - break - self._ExecuteCallback(finish_callback, response) - - -class Upload(_Transfer): - """Data for a single Upload. - - Fields: - stream: The stream to upload. - mime_type: MIME type of the upload. - total_size: (optional) Total upload size for the stream. - close_stream: (default: False) Whether or not we should close the - stream when finished with the upload. - auto_transfer: (default: True) If True, stream all bytes as soon as - the upload is created. - """ - _REQUIRED_SERIALIZATION_KEYS = set(( - 'auto_transfer', 'mime_type', 'total_size', 'url')) - - def __init__(self, stream, mime_type, total_size=None, http=None, - close_stream=False, chunksize=None, auto_transfer=True): - super(Upload, self).__init__( - stream, close_stream=close_stream, chunksize=chunksize, - auto_transfer=auto_transfer, http=http) - self.__complete = False - self.__mime_type = mime_type - self.__progress = 0 - self.__server_chunk_granularity = None - self.__strategy = None - - self.total_size = total_size - - @property - def progress(self): - return self.__progress - - @classmethod - def FromFile(cls, filename, mime_type=None, auto_transfer=True): - """Create a new Upload object from a filename.""" - path = os.path.expanduser(filename) - if not os.path.exists(path): - raise exceptions.NotFoundError('Could not find file %s' % path) - if not mime_type: - mime_type, _ = mimetypes.guess_type(path) - if mime_type is None: - raise exceptions.InvalidUserInputError( - 'Could not guess mime type for %s' % path) - size = os.stat(path).st_size - return cls(open(path, 'rb'), mime_type, total_size=size, close_stream=True, - auto_transfer=auto_transfer) - - @classmethod - def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True): - """Create a new Upload object from a stream.""" - if mime_type is None: - raise exceptions.InvalidUserInputError( - 'No mime_type specified for stream') - return cls(stream, mime_type, total_size=total_size, close_stream=False, - auto_transfer=auto_transfer) - - @classmethod - def FromData(cls, stream, json_data, http, auto_transfer=None): - """Create a new Upload of stream from serialized json_data using http.""" - info = json.loads(json_data) - missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) - if missing_keys: - raise exceptions.InvalidDataError( - 'Invalid serialization data, missing keys: %s' % ( - ', '.join(missing_keys))) - upload = cls.FromStream(stream, info['mime_type'], - total_size=info.get('total_size')) - if isinstance(stream, io.IOBase) and not stream.seekable(): - raise exceptions.InvalidUserInputError( - 'Cannot restart resumable upload on non-seekable stream') - if auto_transfer is not None: - upload.auto_transfer = auto_transfer - else: - upload.auto_transfer = info['auto_transfer'] - upload.strategy = _RESUMABLE_UPLOAD - upload._Initialize(http, info['url']) # pylint: disable=protected-access - upload._RefreshResumableUploadState() # pylint: disable=protected-access - upload.EnsureInitialized() - if upload.auto_transfer: - upload.StreamInChunks() - return upload - - @property - def serialization_data(self): - self.EnsureInitialized() - if self.strategy != _RESUMABLE_UPLOAD: - raise exceptions.InvalidDataError( - 'Serialization only supported for resumable uploads') - return { - 'auto_transfer': self.auto_transfer, - 'mime_type': self.mime_type, - 'total_size': self.total_size, - 'url': self.url, - } - - @property - def complete(self): - return self.__complete - - @property - def mime_type(self): - return self.__mime_type - - def __str__(self): - if not self.initialized: - return 'Upload (uninitialized)' - else: - return 'Upload with %d/%s bytes transferred for url %s' % ( - self.progress, self.total_size or '???', self.url) - - @property - def strategy(self): - return self.__strategy - - @strategy.setter - def strategy(self, value): - if value not in (_SIMPLE_UPLOAD, _RESUMABLE_UPLOAD): - raise exceptions.UserError(( - 'Invalid value "%s" for upload strategy, must be one of ' - '"simple" or "resumable".') % value) - self.__strategy = value - - @property - def total_size(self): - return self.__total_size - - @total_size.setter - def total_size(self, value): - self.EnsureUninitialized() - self.__total_size = value - - def __SetDefaultUploadStrategy(self, upload_config, http_request): - """Determine and set the default upload strategy for this upload. - - We generally prefer simple or multipart, unless we're forced to - use resumable. This happens when any of (1) the upload is too - large, (2) the simple endpoint doesn't support multipart requests - and we have metadata, or (3) there is no simple upload endpoint. - - Args: - upload_config: Configuration for the upload endpoint. - http_request: The associated http request. - - Returns: - None. - """ - if self.strategy is not None: - return - strategy = _SIMPLE_UPLOAD - if (self.total_size is not None and - self.total_size > _RESUMABLE_UPLOAD_THRESHOLD): - strategy = _RESUMABLE_UPLOAD - if http_request.body and not upload_config.simple_multipart: - strategy = _RESUMABLE_UPLOAD - if not upload_config.simple_path: - strategy = _RESUMABLE_UPLOAD - self.strategy = strategy - - def ConfigureRequest(self, upload_config, http_request, url_builder): - """Configure the request and url for this upload.""" - # Validate total_size vs. max_size - if (self.total_size and upload_config.max_size and - self.total_size > upload_config.max_size): - raise exceptions.InvalidUserInputError( - 'Upload too big: %s larger than max size %s' % ( - self.total_size, upload_config.max_size)) - # Validate mime type - if not util.AcceptableMimeType(upload_config.accept, self.mime_type): - raise exceptions.InvalidUserInputError( - 'MIME type %s does not match any accepted MIME ranges %s' % ( - self.mime_type, upload_config.accept)) - - self.__SetDefaultUploadStrategy(upload_config, http_request) - if self.strategy == _SIMPLE_UPLOAD: - url_builder.relative_path = upload_config.simple_path - if http_request.body: - url_builder.query_params['uploadType'] = 'multipart' - self.__ConfigureMultipartRequest(http_request) - else: - url_builder.query_params['uploadType'] = 'media' - self.__ConfigureMediaRequest(http_request) - else: - url_builder.relative_path = upload_config.resumable_path - url_builder.query_params['uploadType'] = 'resumable' - self.__ConfigureResumableRequest(http_request) - - def __ConfigureMediaRequest(self, http_request): - """Configure http_request as a simple request for this upload.""" - http_request.headers['content-type'] = self.mime_type - http_request.body = self.stream.read() - - def __ConfigureMultipartRequest(self, http_request): - """Configure http_request as a multipart request for this upload.""" - # This is a multipart/related upload. - msg_root = mime_multipart.MIMEMultipart('related') - # msg_root should not write out its own headers - setattr(msg_root, '_write_headers', lambda self: None) - - # attach the body as one part - msg = mime_nonmultipart.MIMENonMultipart( - *http_request.headers['content-type'].split('/')) - msg.set_payload(http_request.body) - msg_root.attach(msg) - - # attach the media as the second part - msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/')) - msg['Content-Transfer-Encoding'] = 'binary' - msg.set_payload(self.stream.read()) - msg_root.attach(msg) - - # encode the body: note that we can't use `as_string`, because - # it plays games with `From ` lines. - fp = io.StringIO() - g = email_generator.Generator(fp, mangle_from_=False) - g.flatten(msg_root, unixfrom=False) - http_request.body = fp.getvalue() - - multipart_boundary = msg_root.get_boundary() - http_request.headers['content-type'] = ( - 'multipart/related; boundary=%r' % multipart_boundary) - - def __ConfigureResumableRequest(self, http_request): - http_request.headers['X-Upload-Content-Type'] = self.mime_type - if self.total_size is not None: - http_request.headers['X-Upload-Content-Length'] = str(self.total_size) - - def _RefreshResumableUploadState(self): - """Talk to the server and refresh the state of this resumable upload.""" - if self.strategy != _RESUMABLE_UPLOAD: - return - self.EnsureInitialized() - refresh_request = http_wrapper.Request( - url=self.url, http_method='PUT', headers={'Content-Range': 'bytes */*'}) - refresh_response = http_wrapper.MakeRequest( - self.http, refresh_request, redirections=0) - range_header = refresh_response.info.get( - 'Range', refresh_response.info.get('range')) - if refresh_response.status_code in (http_client.OK, http_client.CREATED): - self.__complete = True - elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE: - if range_header is None: - self.__progress = 0 - else: - self.__progress = self.__GetLastByte(range_header) + 1 - self.stream.seek(self.progress) - else: - raise exceptions.HttpError.FromResponse(refresh_response) - - def InitializeUpload(self, http_request, http=None, client=None): - """Initialize this upload from the given http_request.""" - if self.strategy is None: - raise exceptions.UserError( - 'No upload strategy set; did you call ConfigureRequest?') - if http is None and client is None: - raise exceptions.UserError('Must provide client or http.') - if self.strategy != _RESUMABLE_UPLOAD: - return - if self.total_size is None: - raise exceptions.InvalidUserInputError( - 'Cannot stream upload without total size') - http = http or client.http - if client is not None: - http_request.url = client.FinalizeTransferUrl(http_request.url) - self.EnsureUninitialized() - http_response = http_wrapper.MakeRequest(http, http_request) - if http_response.status_code != http_client.OK: - raise exceptions.HttpError.FromResponse(http_response) - - self.__server_chunk_granularity = http_response.info.get( - 'X-Goog-Upload-Chunk-Granularity') - self.__ValidateChunksize() - url = http_response.info['location'] - if client is not None: - url = client.FinalizeTransferUrl(url) - self._Initialize(http, url) - - # Unless the user has requested otherwise, we want to just - # go ahead and pump the bytes now. - if self.auto_transfer: - return self.StreamInChunks() - - def __GetLastByte(self, range_header): - _, _, end = range_header.partition('-') - # TODO(craigcitro): Validate start == 0? - return int(end) - - def __ValidateChunksize(self, chunksize=None): - if self.__server_chunk_granularity is None: - return - chunksize = chunksize or self.chunksize - if chunksize % self.__server_chunk_granularity: - raise exceptions.ConfigurationValueError( - 'Server requires chunksize to be a multiple of %d', - self.__server_chunk_granularity) - - @staticmethod - def _ArgPrinter(response, unused_upload): - print('Sent %s' % response.info['range']) - - @staticmethod - def _CompletePrinter(*unused_args): - print('Upload complete') - - def StreamInChunks(self, callback=None, finish_callback=None, - additional_headers=None): - """Send this (resumable) upload in chunks.""" - if self.strategy != _RESUMABLE_UPLOAD: - raise exceptions.InvalidUserInputError( - 'Cannot stream non-resumable upload') - if self.total_size is None: - raise exceptions.InvalidUserInputError( - 'Cannot stream upload without total size') - callback = callback or self._ArgPrinter - finish_callback = finish_callback or self._CompletePrinter - response = None - self.__ValidateChunksize(self.chunksize) - self.EnsureInitialized() - while not self.complete: - response = self.__SendChunk(self.stream.tell(), - additional_headers=additional_headers) - if response.status_code in (http_client.OK, http_client.CREATED): - self.__complete = True - break - self.__progress = self.__GetLastByte(response.info['range']) - if self.progress + 1 != self.stream.tell(): - # TODO(craigcitro): Add a better way to recover here. - raise exceptions.CommunicationError( - 'Failed to transfer all bytes in chunk, upload paused at byte ' - '%d' % self.progress) - self._ExecuteCallback(callback, response) - self._ExecuteCallback(finish_callback, response) - return response - - def __SendChunk(self, start, additional_headers=None, data=None): - """Send the specified chunk.""" - self.EnsureInitialized() - if data is None: - data = self.stream.read(self.chunksize) - end = start + len(data) - - request = http_wrapper.Request(url=self.url, http_method='PUT', body=data) - request.headers['Content-Type'] = self.mime_type - if data: - request.headers['Content-Range'] = 'bytes %s-%s/%s' % ( - start, end - 1, self.total_size) - if additional_headers: - request.headers.update(additional_headers) - - response = http_wrapper.MakeRequest(self.bytes_http, request) - if response.status_code not in (http_client.OK, http_client.CREATED, - http_wrapper.RESUME_INCOMPLETE): - raise exceptions.HttpError.FromResponse(response) - if response.status_code in (http_client.OK, http_client.CREATED): - return response - # TODO(craigcitro): Add retries on no progress? - last_byte = self.__GetLastByte(response.info['range']) - if last_byte + 1 != end: - new_start = last_byte + 1 - start - response = self.__SendChunk(last_byte + 1, data=data[new_start:]) - return response diff --git a/_gcloud_vendor/apitools/base/py/util.py b/_gcloud_vendor/apitools/base/py/util.py deleted file mode 100644 index 3c3fff53768b..000000000000 --- a/_gcloud_vendor/apitools/base/py/util.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env python -"""Assorted utilities shared between parts of apitools.""" - -import collections -import os -import random - -import six -from six.moves import http_client -from six.moves.urllib.error import URLError -from six.moves.urllib.parse import quote -from six.moves.urllib.request import urlopen - -from _gcloud_vendor.apitools.base.py import exceptions - -__all__ = [ - 'DetectGae', - 'DetectGce', -] - -_RESERVED_URI_CHARS = r":/?#[]@!$&'()*+,;=" - - -def DetectGae(): - """Determine whether or not we're running on GAE. - - This is based on: - https://developers.google.com/appengine/docs/python/#The_Environment - - Returns: - True iff we're running on GAE. - """ - server_software = os.environ.get('SERVER_SOFTWARE', '') - return (server_software.startswith('Development/') or - server_software.startswith('Google App Engine/')) - - -def DetectGce(): - """Determine whether or not we're running on GCE. - - This is based on: - https://cloud.google.com/compute/docs/metadata#runninggce - - Returns: - True iff we're running on a GCE instance. - """ - try: - o = urlopen('http://metadata.google.internal') - except URLError: - return False - return (o.getcode() == http_client.OK and - o.headers.get('metadata-flavor') == 'Google') - - -def NormalizeScopes(scope_spec): - """Normalize scope_spec to a set of strings.""" - if isinstance(scope_spec, six.string_types): - return set(scope_spec.split(' ')) - elif isinstance(scope_spec, collections.Iterable): - return set(scope_spec) - raise exceptions.TypecheckError( - 'NormalizeScopes expected string or iterable, found %s' % ( - type(scope_spec),)) - - -def Typecheck(arg, arg_type, msg=None): - if not isinstance(arg, arg_type): - if msg is None: - if isinstance(arg_type, tuple): - msg = 'Type of arg is "%s", not one of %r' % (type(arg), arg_type) - else: - msg = 'Type of arg is "%s", not "%s"' % (type(arg), arg_type) - raise exceptions.TypecheckError(msg) - return arg - - -def ExpandRelativePath(method_config, params, relative_path=None): - """Determine the relative path for request.""" - path = relative_path or method_config.relative_path or '' - - for param in method_config.path_params: - param_template = '{%s}' % param - # For more details about "reserved word expansion", see: - # http://tools.ietf.org/html/rfc6570#section-3.2.2 - reserved_chars = '' - reserved_template = '{+%s}' % param - if reserved_template in path: - reserved_chars = _RESERVED_URI_CHARS - path = path.replace(reserved_template, param_template) - if param_template not in path: - raise exceptions.InvalidUserInputError( - 'Missing path parameter %s' % param) - try: - # TODO(craigcitro): Do we want to support some sophisticated - # mapping here? - value = params[param] - except KeyError: - raise exceptions.InvalidUserInputError( - 'Request missing required parameter %s' % param) - if value is None: - raise exceptions.InvalidUserInputError( - 'Request missing required parameter %s' % param) - try: - if not isinstance(value, six.string_types): - value = str(value) - path = path.replace(param_template, - quote(value.encode('utf_8'), reserved_chars)) - except TypeError as e: - raise exceptions.InvalidUserInputError( - 'Error setting required parameter %s to value %s: %s' % ( - param, value, e)) - return path - - -def CalculateWaitForRetry(retry_attempt, max_wait=60): - """Calculates amount of time to wait before a retry attempt. - - Wait time grows exponentially with the number of attempts. - A random amount of jitter is added to spread out retry attempts from different - clients. - - Args: - retry_attempt: Retry attempt counter. - max_wait: Upper bound for wait time. - - Returns: - Amount of time to wait before retrying request. - """ - - wait_time = 2 ** retry_attempt - # randrange requires a nonzero interval, so we want to drop it if - # the range is too small for jitter. - if retry_attempt: - max_jitter = (2 ** retry_attempt) / 2 - wait_time += random.randrange(-max_jitter, max_jitter) - return min(wait_time, max_wait) - - -def AcceptableMimeType(accept_patterns, mime_type): - """Return True iff mime_type is acceptable for one of accept_patterns. - - Note that this function assumes that all patterns in accept_patterns - will be simple types of the form "type/subtype", where one or both - of these can be "*". We do not support parameters (i.e. "; q=") in - patterns. - - Args: - accept_patterns: list of acceptable MIME types. - mime_type: the mime type we would like to match. - - Returns: - Whether or not mime_type matches (at least) one of these patterns. - """ - unsupported_patterns = [p for p in accept_patterns if ';' in p] - if unsupported_patterns: - raise exceptions.GeneratedClientError( - 'MIME patterns with parameter unsupported: "%s"' % ', '.join( - unsupported_patterns)) - def MimeTypeMatches(pattern, mime_type): - """Return True iff mime_type is acceptable for pattern.""" - # Some systems use a single '*' instead of '*/*'. - if pattern == '*': - pattern = '*/*' - return all(accept in ('*', provided) for accept, provided - in zip(pattern.split('/'), mime_type.split('/'))) - - return any(MimeTypeMatches(pattern, mime_type) for pattern in accept_patterns) diff --git a/gcloud/storage/_storage_v1_client.py b/gcloud/storage/_storage_v1_client.py new file mode 100644 index 000000000000..adf6f4f5ff1e --- /dev/null +++ b/gcloud/storage/_storage_v1_client.py @@ -0,0 +1,996 @@ +"""Generated client library for storage version v1.""" +# NOTE: This file is autogenerated and should not be edited by hand. +from apitools.base.py import base_api +import gcloud.storage._storage_v1_messages as messages + + +class StorageV1(base_api.BaseApiClient): + """Generated client library for service storage version v1.""" + + MESSAGES_MODULE = messages + + _PACKAGE = u'storage' + _SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/devstorage.full_control', u'https://www.googleapis.com/auth/devstorage.read_only', u'https://www.googleapis.com/auth/devstorage.read_write'] + _VERSION = u'v1' + _CLIENT_ID = '1042881264118.apps.googleusercontent.com' + _CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b' + _USER_AGENT = '' + _CLIENT_CLASS_NAME = u'StorageV1' + _URL_VERSION = u'v1' + _API_KEY = None + + def __init__(self, url='', credentials=None, + get_credentials=True, http=None, model=None, + log_request=False, log_response=False, + credentials_args=None, default_global_params=None, + additional_http_headers=None): + """Create a new storage handle.""" + url = url or u'https://www.googleapis.com/storage/v1/' + super(StorageV1, self).__init__( + url, credentials=credentials, + get_credentials=get_credentials, http=http, model=model, + log_request=log_request, log_response=log_response, + credentials_args=credentials_args, + default_global_params=default_global_params, + additional_http_headers=additional_http_headers) + self.bucketAccessControls = self.BucketAccessControlsService(self) + self.buckets = self.BucketsService(self) + self.channels = self.ChannelsService(self) + self.defaultObjectAccessControls = self.DefaultObjectAccessControlsService(self) + self.objectAccessControls = self.ObjectAccessControlsService(self) + self.objects = self.ObjectsService(self) + + class BucketAccessControlsService(base_api.BaseApiService): + """Service class for the bucketAccessControls resource.""" + + _NAME = u'bucketAccessControls' + + def __init__(self, client): + super(StorageV1.BucketAccessControlsService, self).__init__(client) + self._method_configs = { + 'Delete': base_api.ApiMethodInfo( + http_method=u'DELETE', + method_id=u'storage.bucketAccessControls.delete', + ordered_params=[u'bucket', u'entity'], + path_params=[u'bucket', u'entity'], + query_params=[], + relative_path=u'b/{bucket}/acl/{entity}', + request_field='', + request_type_name=u'StorageBucketAccessControlsDeleteRequest', + response_type_name=u'StorageBucketAccessControlsDeleteResponse', + supports_download=False, + ), + 'Get': base_api.ApiMethodInfo( + http_method=u'GET', + method_id=u'storage.bucketAccessControls.get', + ordered_params=[u'bucket', u'entity'], + path_params=[u'bucket', u'entity'], + query_params=[], + relative_path=u'b/{bucket}/acl/{entity}', + request_field='', + request_type_name=u'StorageBucketAccessControlsGetRequest', + response_type_name=u'BucketAccessControl', + supports_download=False, + ), + 'Insert': base_api.ApiMethodInfo( + http_method=u'POST', + method_id=u'storage.bucketAccessControls.insert', + ordered_params=[u'bucket'], + path_params=[u'bucket'], + query_params=[], + relative_path=u'b/{bucket}/acl', + request_field='', + request_type_name=u'BucketAccessControl', + response_type_name=u'BucketAccessControl', + supports_download=False, + ), + 'List': base_api.ApiMethodInfo( + http_method=u'GET', + method_id=u'storage.bucketAccessControls.list', + ordered_params=[u'bucket'], + path_params=[u'bucket'], + query_params=[], + relative_path=u'b/{bucket}/acl', + request_field='', + request_type_name=u'StorageBucketAccessControlsListRequest', + response_type_name=u'BucketAccessControls', + supports_download=False, + ), + 'Patch': base_api.ApiMethodInfo( + http_method=u'PATCH', + method_id=u'storage.bucketAccessControls.patch', + ordered_params=[u'bucket', u'entity'], + path_params=[u'bucket', u'entity'], + query_params=[], + relative_path=u'b/{bucket}/acl/{entity}', + request_field='', + request_type_name=u'BucketAccessControl', + response_type_name=u'BucketAccessControl', + supports_download=False, + ), + 'Update': base_api.ApiMethodInfo( + http_method=u'PUT', + method_id=u'storage.bucketAccessControls.update', + ordered_params=[u'bucket', u'entity'], + path_params=[u'bucket', u'entity'], + query_params=[], + relative_path=u'b/{bucket}/acl/{entity}', + request_field='', + request_type_name=u'BucketAccessControl', + response_type_name=u'BucketAccessControl', + supports_download=False, + ), + } + + self._upload_configs = { + } + + def Delete(self, request, global_params=None): + """Permanently deletes the ACL entry for the specified entity on the specified bucket. + + Args: + request: (StorageBucketAccessControlsDeleteRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (StorageBucketAccessControlsDeleteResponse) The response message. + """ + config = self.GetMethodConfig('Delete') + return self._RunMethod( + config, request, global_params=global_params) + + def Get(self, request, global_params=None): + """Returns the ACL entry for the specified entity on the specified bucket. + + Args: + request: (StorageBucketAccessControlsGetRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (BucketAccessControl) The response message. + """ + config = self.GetMethodConfig('Get') + return self._RunMethod( + config, request, global_params=global_params) + + def Insert(self, request, global_params=None): + """Creates a new ACL entry on the specified bucket. + + Args: + request: (BucketAccessControl) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (BucketAccessControl) The response message. + """ + config = self.GetMethodConfig('Insert') + return self._RunMethod( + config, request, global_params=global_params) + + def List(self, request, global_params=None): + """Retrieves ACL entries on the specified bucket. + + Args: + request: (StorageBucketAccessControlsListRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (BucketAccessControls) The response message. + """ + config = self.GetMethodConfig('List') + return self._RunMethod( + config, request, global_params=global_params) + + def Patch(self, request, global_params=None): + """Updates an ACL entry on the specified bucket. This method supports patch semantics. + + Args: + request: (BucketAccessControl) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (BucketAccessControl) The response message. + """ + config = self.GetMethodConfig('Patch') + return self._RunMethod( + config, request, global_params=global_params) + + def Update(self, request, global_params=None): + """Updates an ACL entry on the specified bucket. + + Args: + request: (BucketAccessControl) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (BucketAccessControl) The response message. + """ + config = self.GetMethodConfig('Update') + return self._RunMethod( + config, request, global_params=global_params) + + class BucketsService(base_api.BaseApiService): + """Service class for the buckets resource.""" + + _NAME = u'buckets' + + def __init__(self, client): + super(StorageV1.BucketsService, self).__init__(client) + self._method_configs = { + 'Delete': base_api.ApiMethodInfo( + http_method=u'DELETE', + method_id=u'storage.buckets.delete', + ordered_params=[u'bucket'], + path_params=[u'bucket'], + query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'], + relative_path=u'b/{bucket}', + request_field='', + request_type_name=u'StorageBucketsDeleteRequest', + response_type_name=u'StorageBucketsDeleteResponse', + supports_download=False, + ), + 'Get': base_api.ApiMethodInfo( + http_method=u'GET', + method_id=u'storage.buckets.get', + ordered_params=[u'bucket'], + path_params=[u'bucket'], + query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'projection'], + relative_path=u'b/{bucket}', + request_field='', + request_type_name=u'StorageBucketsGetRequest', + response_type_name=u'Bucket', + supports_download=False, + ), + 'Insert': base_api.ApiMethodInfo( + http_method=u'POST', + method_id=u'storage.buckets.insert', + ordered_params=[u'project'], + path_params=[], + query_params=[u'predefinedAcl', u'predefinedDefaultObjectAcl', u'project', u'projection'], + relative_path=u'b', + request_field=u'bucket', + request_type_name=u'StorageBucketsInsertRequest', + response_type_name=u'Bucket', + supports_download=False, + ), + 'List': base_api.ApiMethodInfo( + http_method=u'GET', + method_id=u'storage.buckets.list', + ordered_params=[u'project'], + path_params=[], + query_params=[u'maxResults', u'pageToken', u'prefix', u'project', u'projection'], + relative_path=u'b', + request_field='', + request_type_name=u'StorageBucketsListRequest', + response_type_name=u'Buckets', + supports_download=False, + ), + 'Patch': base_api.ApiMethodInfo( + http_method=u'PATCH', + method_id=u'storage.buckets.patch', + ordered_params=[u'bucket'], + path_params=[u'bucket'], + query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'predefinedDefaultObjectAcl', u'projection'], + relative_path=u'b/{bucket}', + request_field=u'bucketResource', + request_type_name=u'StorageBucketsPatchRequest', + response_type_name=u'Bucket', + supports_download=False, + ), + 'Update': base_api.ApiMethodInfo( + http_method=u'PUT', + method_id=u'storage.buckets.update', + ordered_params=[u'bucket'], + path_params=[u'bucket'], + query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'predefinedDefaultObjectAcl', u'projection'], + relative_path=u'b/{bucket}', + request_field=u'bucketResource', + request_type_name=u'StorageBucketsUpdateRequest', + response_type_name=u'Bucket', + supports_download=False, + ), + } + + self._upload_configs = { + } + + def Delete(self, request, global_params=None): + """Permanently deletes an empty bucket. + + Args: + request: (StorageBucketsDeleteRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (StorageBucketsDeleteResponse) The response message. + """ + config = self.GetMethodConfig('Delete') + return self._RunMethod( + config, request, global_params=global_params) + + def Get(self, request, global_params=None): + """Returns metadata for the specified bucket. + + Args: + request: (StorageBucketsGetRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (Bucket) The response message. + """ + config = self.GetMethodConfig('Get') + return self._RunMethod( + config, request, global_params=global_params) + + def Insert(self, request, global_params=None): + """Creates a new bucket. + + Args: + request: (StorageBucketsInsertRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (Bucket) The response message. + """ + config = self.GetMethodConfig('Insert') + return self._RunMethod( + config, request, global_params=global_params) + + def List(self, request, global_params=None): + """Retrieves a list of buckets for a given project. + + Args: + request: (StorageBucketsListRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (Buckets) The response message. + """ + config = self.GetMethodConfig('List') + return self._RunMethod( + config, request, global_params=global_params) + + def Patch(self, request, global_params=None): + """Updates a bucket. This method supports patch semantics. + + Args: + request: (StorageBucketsPatchRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (Bucket) The response message. + """ + config = self.GetMethodConfig('Patch') + return self._RunMethod( + config, request, global_params=global_params) + + def Update(self, request, global_params=None): + """Updates a bucket. + + Args: + request: (StorageBucketsUpdateRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (Bucket) The response message. + """ + config = self.GetMethodConfig('Update') + return self._RunMethod( + config, request, global_params=global_params) + + class ChannelsService(base_api.BaseApiService): + """Service class for the channels resource.""" + + _NAME = u'channels' + + def __init__(self, client): + super(StorageV1.ChannelsService, self).__init__(client) + self._method_configs = { + 'Stop': base_api.ApiMethodInfo( + http_method=u'POST', + method_id=u'storage.channels.stop', + ordered_params=[], + path_params=[], + query_params=[], + relative_path=u'channels/stop', + request_field='', + request_type_name=u'Channel', + response_type_name=u'StorageChannelsStopResponse', + supports_download=False, + ), + } + + self._upload_configs = { + } + + def Stop(self, request, global_params=None): + """Stop watching resources through this channel. + + Args: + request: (Channel) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (StorageChannelsStopResponse) The response message. + """ + config = self.GetMethodConfig('Stop') + return self._RunMethod( + config, request, global_params=global_params) + + class DefaultObjectAccessControlsService(base_api.BaseApiService): + """Service class for the defaultObjectAccessControls resource.""" + + _NAME = u'defaultObjectAccessControls' + + def __init__(self, client): + super(StorageV1.DefaultObjectAccessControlsService, self).__init__(client) + self._method_configs = { + 'Delete': base_api.ApiMethodInfo( + http_method=u'DELETE', + method_id=u'storage.defaultObjectAccessControls.delete', + ordered_params=[u'bucket', u'entity'], + path_params=[u'bucket', u'entity'], + query_params=[], + relative_path=u'b/{bucket}/defaultObjectAcl/{entity}', + request_field='', + request_type_name=u'StorageDefaultObjectAccessControlsDeleteRequest', + response_type_name=u'StorageDefaultObjectAccessControlsDeleteResponse', + supports_download=False, + ), + 'Get': base_api.ApiMethodInfo( + http_method=u'GET', + method_id=u'storage.defaultObjectAccessControls.get', + ordered_params=[u'bucket', u'entity'], + path_params=[u'bucket', u'entity'], + query_params=[], + relative_path=u'b/{bucket}/defaultObjectAcl/{entity}', + request_field='', + request_type_name=u'StorageDefaultObjectAccessControlsGetRequest', + response_type_name=u'ObjectAccessControl', + supports_download=False, + ), + 'Insert': base_api.ApiMethodInfo( + http_method=u'POST', + method_id=u'storage.defaultObjectAccessControls.insert', + ordered_params=[u'bucket'], + path_params=[u'bucket'], + query_params=[], + relative_path=u'b/{bucket}/defaultObjectAcl', + request_field='', + request_type_name=u'ObjectAccessControl', + response_type_name=u'ObjectAccessControl', + supports_download=False, + ), + 'List': base_api.ApiMethodInfo( + http_method=u'GET', + method_id=u'storage.defaultObjectAccessControls.list', + ordered_params=[u'bucket'], + path_params=[u'bucket'], + query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'], + relative_path=u'b/{bucket}/defaultObjectAcl', + request_field='', + request_type_name=u'StorageDefaultObjectAccessControlsListRequest', + response_type_name=u'ObjectAccessControls', + supports_download=False, + ), + 'Patch': base_api.ApiMethodInfo( + http_method=u'PATCH', + method_id=u'storage.defaultObjectAccessControls.patch', + ordered_params=[u'bucket', u'entity'], + path_params=[u'bucket', u'entity'], + query_params=[], + relative_path=u'b/{bucket}/defaultObjectAcl/{entity}', + request_field='', + request_type_name=u'ObjectAccessControl', + response_type_name=u'ObjectAccessControl', + supports_download=False, + ), + 'Update': base_api.ApiMethodInfo( + http_method=u'PUT', + method_id=u'storage.defaultObjectAccessControls.update', + ordered_params=[u'bucket', u'entity'], + path_params=[u'bucket', u'entity'], + query_params=[], + relative_path=u'b/{bucket}/defaultObjectAcl/{entity}', + request_field='', + request_type_name=u'ObjectAccessControl', + response_type_name=u'ObjectAccessControl', + supports_download=False, + ), + } + + self._upload_configs = { + } + + def Delete(self, request, global_params=None): + """Permanently deletes the default object ACL entry for the specified entity on the specified bucket. + + Args: + request: (StorageDefaultObjectAccessControlsDeleteRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (StorageDefaultObjectAccessControlsDeleteResponse) The response message. + """ + config = self.GetMethodConfig('Delete') + return self._RunMethod( + config, request, global_params=global_params) + + def Get(self, request, global_params=None): + """Returns the default object ACL entry for the specified entity on the specified bucket. + + Args: + request: (StorageDefaultObjectAccessControlsGetRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (ObjectAccessControl) The response message. + """ + config = self.GetMethodConfig('Get') + return self._RunMethod( + config, request, global_params=global_params) + + def Insert(self, request, global_params=None): + """Creates a new default object ACL entry on the specified bucket. + + Args: + request: (ObjectAccessControl) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (ObjectAccessControl) The response message. + """ + config = self.GetMethodConfig('Insert') + return self._RunMethod( + config, request, global_params=global_params) + + def List(self, request, global_params=None): + """Retrieves default object ACL entries on the specified bucket. + + Args: + request: (StorageDefaultObjectAccessControlsListRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (ObjectAccessControls) The response message. + """ + config = self.GetMethodConfig('List') + return self._RunMethod( + config, request, global_params=global_params) + + def Patch(self, request, global_params=None): + """Updates a default object ACL entry on the specified bucket. This method supports patch semantics. + + Args: + request: (ObjectAccessControl) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (ObjectAccessControl) The response message. + """ + config = self.GetMethodConfig('Patch') + return self._RunMethod( + config, request, global_params=global_params) + + def Update(self, request, global_params=None): + """Updates a default object ACL entry on the specified bucket. + + Args: + request: (ObjectAccessControl) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (ObjectAccessControl) The response message. + """ + config = self.GetMethodConfig('Update') + return self._RunMethod( + config, request, global_params=global_params) + + class ObjectAccessControlsService(base_api.BaseApiService): + """Service class for the objectAccessControls resource.""" + + _NAME = u'objectAccessControls' + + def __init__(self, client): + super(StorageV1.ObjectAccessControlsService, self).__init__(client) + self._method_configs = { + 'Delete': base_api.ApiMethodInfo( + http_method=u'DELETE', + method_id=u'storage.objectAccessControls.delete', + ordered_params=[u'bucket', u'object', u'entity'], + path_params=[u'bucket', u'entity', u'object'], + query_params=[u'generation'], + relative_path=u'b/{bucket}/o/{object}/acl/{entity}', + request_field='', + request_type_name=u'StorageObjectAccessControlsDeleteRequest', + response_type_name=u'StorageObjectAccessControlsDeleteResponse', + supports_download=False, + ), + 'Get': base_api.ApiMethodInfo( + http_method=u'GET', + method_id=u'storage.objectAccessControls.get', + ordered_params=[u'bucket', u'object', u'entity'], + path_params=[u'bucket', u'entity', u'object'], + query_params=[u'generation'], + relative_path=u'b/{bucket}/o/{object}/acl/{entity}', + request_field='', + request_type_name=u'StorageObjectAccessControlsGetRequest', + response_type_name=u'ObjectAccessControl', + supports_download=False, + ), + 'Insert': base_api.ApiMethodInfo( + http_method=u'POST', + method_id=u'storage.objectAccessControls.insert', + ordered_params=[u'bucket', u'object'], + path_params=[u'bucket', u'object'], + query_params=[u'generation'], + relative_path=u'b/{bucket}/o/{object}/acl', + request_field=u'objectAccessControl', + request_type_name=u'StorageObjectAccessControlsInsertRequest', + response_type_name=u'ObjectAccessControl', + supports_download=False, + ), + 'List': base_api.ApiMethodInfo( + http_method=u'GET', + method_id=u'storage.objectAccessControls.list', + ordered_params=[u'bucket', u'object'], + path_params=[u'bucket', u'object'], + query_params=[u'generation'], + relative_path=u'b/{bucket}/o/{object}/acl', + request_field='', + request_type_name=u'StorageObjectAccessControlsListRequest', + response_type_name=u'ObjectAccessControls', + supports_download=False, + ), + 'Patch': base_api.ApiMethodInfo( + http_method=u'PATCH', + method_id=u'storage.objectAccessControls.patch', + ordered_params=[u'bucket', u'object', u'entity'], + path_params=[u'bucket', u'entity', u'object'], + query_params=[u'generation'], + relative_path=u'b/{bucket}/o/{object}/acl/{entity}', + request_field=u'objectAccessControl', + request_type_name=u'StorageObjectAccessControlsPatchRequest', + response_type_name=u'ObjectAccessControl', + supports_download=False, + ), + 'Update': base_api.ApiMethodInfo( + http_method=u'PUT', + method_id=u'storage.objectAccessControls.update', + ordered_params=[u'bucket', u'object', u'entity'], + path_params=[u'bucket', u'entity', u'object'], + query_params=[u'generation'], + relative_path=u'b/{bucket}/o/{object}/acl/{entity}', + request_field=u'objectAccessControl', + request_type_name=u'StorageObjectAccessControlsUpdateRequest', + response_type_name=u'ObjectAccessControl', + supports_download=False, + ), + } + + self._upload_configs = { + } + + def Delete(self, request, global_params=None): + """Permanently deletes the ACL entry for the specified entity on the specified object. + + Args: + request: (StorageObjectAccessControlsDeleteRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (StorageObjectAccessControlsDeleteResponse) The response message. + """ + config = self.GetMethodConfig('Delete') + return self._RunMethod( + config, request, global_params=global_params) + + def Get(self, request, global_params=None): + """Returns the ACL entry for the specified entity on the specified object. + + Args: + request: (StorageObjectAccessControlsGetRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (ObjectAccessControl) The response message. + """ + config = self.GetMethodConfig('Get') + return self._RunMethod( + config, request, global_params=global_params) + + def Insert(self, request, global_params=None): + """Creates a new ACL entry on the specified object. + + Args: + request: (StorageObjectAccessControlsInsertRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (ObjectAccessControl) The response message. + """ + config = self.GetMethodConfig('Insert') + return self._RunMethod( + config, request, global_params=global_params) + + def List(self, request, global_params=None): + """Retrieves ACL entries on the specified object. + + Args: + request: (StorageObjectAccessControlsListRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (ObjectAccessControls) The response message. + """ + config = self.GetMethodConfig('List') + return self._RunMethod( + config, request, global_params=global_params) + + def Patch(self, request, global_params=None): + """Updates an ACL entry on the specified object. This method supports patch semantics. + + Args: + request: (StorageObjectAccessControlsPatchRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (ObjectAccessControl) The response message. + """ + config = self.GetMethodConfig('Patch') + return self._RunMethod( + config, request, global_params=global_params) + + def Update(self, request, global_params=None): + """Updates an ACL entry on the specified object. + + Args: + request: (StorageObjectAccessControlsUpdateRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (ObjectAccessControl) The response message. + """ + config = self.GetMethodConfig('Update') + return self._RunMethod( + config, request, global_params=global_params) + + class ObjectsService(base_api.BaseApiService): + """Service class for the objects resource.""" + + _NAME = u'objects' + + def __init__(self, client): + super(StorageV1.ObjectsService, self).__init__(client) + self._method_configs = { + 'Compose': base_api.ApiMethodInfo( + http_method=u'POST', + method_id=u'storage.objects.compose', + ordered_params=[u'destinationBucket', u'destinationObject'], + path_params=[u'destinationBucket', u'destinationObject'], + query_params=[u'destinationPredefinedAcl', u'encryptionAlgorithm', u'encryptionKey', u'encryptionKeyHash', u'ifGenerationMatch', u'ifMetagenerationMatch'], + relative_path=u'b/{destinationBucket}/o/{destinationObject}/compose', + request_field=u'composeRequest', + request_type_name=u'StorageObjectsComposeRequest', + response_type_name=u'Object', + supports_download=True, + ), + 'Copy': base_api.ApiMethodInfo( + http_method=u'POST', + method_id=u'storage.objects.copy', + ordered_params=[u'sourceBucket', u'sourceObject', u'destinationBucket', u'destinationObject'], + path_params=[u'destinationBucket', u'destinationObject', u'sourceBucket', u'sourceObject'], + query_params=[u'destinationPredefinedAcl', u'encryptionAlgorithm', u'encryptionKey', u'encryptionKeyHash', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'ifSourceGenerationMatch', u'ifSourceGenerationNotMatch', u'ifSourceMetagenerationMatch', u'ifSourceMetagenerationNotMatch', u'projection', u'sourceGeneration'], + relative_path=u'b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}', + request_field=u'object', + request_type_name=u'StorageObjectsCopyRequest', + response_type_name=u'Object', + supports_download=True, + ), + 'Delete': base_api.ApiMethodInfo( + http_method=u'DELETE', + method_id=u'storage.objects.delete', + ordered_params=[u'bucket', u'object'], + path_params=[u'bucket', u'object'], + query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'], + relative_path=u'b/{bucket}/o/{object}', + request_field='', + request_type_name=u'StorageObjectsDeleteRequest', + response_type_name=u'StorageObjectsDeleteResponse', + supports_download=False, + ), + 'Get': base_api.ApiMethodInfo( + http_method=u'GET', + method_id=u'storage.objects.get', + ordered_params=[u'bucket', u'object'], + path_params=[u'bucket', u'object'], + query_params=[u'encryptionAlgorithm', u'encryptionKey', u'encryptionKeyHash', u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'projection'], + relative_path=u'b/{bucket}/o/{object}', + request_field='', + request_type_name=u'StorageObjectsGetRequest', + response_type_name=u'Object', + supports_download=True, + ), + 'Insert': base_api.ApiMethodInfo( + http_method=u'POST', + method_id=u'storage.objects.insert', + ordered_params=[u'bucket'], + path_params=[u'bucket'], + query_params=[u'contentEncoding', u'encryptionAlgorithm', u'encryptionKey', u'encryptionKeyHash', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'name', u'predefinedAcl', u'projection'], + relative_path=u'b/{bucket}/o', + request_field=u'object', + request_type_name=u'StorageObjectsInsertRequest', + response_type_name=u'Object', + supports_download=True, + ), + 'List': base_api.ApiMethodInfo( + http_method=u'GET', + method_id=u'storage.objects.list', + ordered_params=[u'bucket'], + path_params=[u'bucket'], + query_params=[u'delimiter', u'maxResults', u'pageToken', u'prefix', u'projection', u'versions'], + relative_path=u'b/{bucket}/o', + request_field='', + request_type_name=u'StorageObjectsListRequest', + response_type_name=u'Objects', + supports_download=False, + ), + 'Patch': base_api.ApiMethodInfo( + http_method=u'PATCH', + method_id=u'storage.objects.patch', + ordered_params=[u'bucket', u'object'], + path_params=[u'bucket', u'object'], + query_params=[u'encryptionAlgorithm', u'encryptionKey', u'encryptionKeyHash', u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'projection'], + relative_path=u'b/{bucket}/o/{object}', + request_field=u'objectResource', + request_type_name=u'StorageObjectsPatchRequest', + response_type_name=u'Object', + supports_download=False, + ), + 'Update': base_api.ApiMethodInfo( + http_method=u'PUT', + method_id=u'storage.objects.update', + ordered_params=[u'bucket', u'object'], + path_params=[u'bucket', u'object'], + query_params=[u'encryptionAlgorithm', u'encryptionKey', u'encryptionKeyHash', u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'projection'], + relative_path=u'b/{bucket}/o/{object}', + request_field=u'objectResource', + request_type_name=u'StorageObjectsUpdateRequest', + response_type_name=u'Object', + supports_download=True, + ), + 'WatchAll': base_api.ApiMethodInfo( + http_method=u'POST', + method_id=u'storage.objects.watchAll', + ordered_params=[u'bucket'], + path_params=[u'bucket'], + query_params=[u'delimiter', u'maxResults', u'pageToken', u'prefix', u'projection', u'versions'], + relative_path=u'b/{bucket}/o/watch', + request_field=u'channel', + request_type_name=u'StorageObjectsWatchAllRequest', + response_type_name=u'Channel', + supports_download=False, + ), + } + + self._upload_configs = { + 'Insert': base_api.ApiUploadInfo( + accept=['*/*'], + max_size=None, + resumable_multipart=True, + resumable_path=u'/resumable/upload/storage/v1/b/{bucket}/o', + simple_multipart=True, + simple_path=u'/upload/storage/v1/b/{bucket}/o', + ), + } + + def Compose(self, request, global_params=None, download=None): + """Concatenates a list of existing objects into a new object in the same bucket. + + Args: + request: (StorageObjectsComposeRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + download: (Download, default: None) If present, download + data from the request via this stream. + Returns: + (Object) The response message. + """ + config = self.GetMethodConfig('Compose') + return self._RunMethod( + config, request, global_params=global_params, + download=download) + + def Copy(self, request, global_params=None, download=None): + """Copies an object to a specified location. Optionally overrides metadata. + + Args: + request: (StorageObjectsCopyRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + download: (Download, default: None) If present, download + data from the request via this stream. + Returns: + (Object) The response message. + """ + config = self.GetMethodConfig('Copy') + return self._RunMethod( + config, request, global_params=global_params, + download=download) + + def Delete(self, request, global_params=None): + """Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used. + + Args: + request: (StorageObjectsDeleteRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (StorageObjectsDeleteResponse) The response message. + """ + config = self.GetMethodConfig('Delete') + return self._RunMethod( + config, request, global_params=global_params) + + def Get(self, request, global_params=None, download=None): + """Retrieves an object or its metadata. + + Args: + request: (StorageObjectsGetRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + download: (Download, default: None) If present, download + data from the request via this stream. + Returns: + (Object) The response message. + """ + config = self.GetMethodConfig('Get') + return self._RunMethod( + config, request, global_params=global_params, + download=download) + + def Insert(self, request, global_params=None, upload=None, download=None): + """Stores a new object and metadata. + + Args: + request: (StorageObjectsInsertRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + upload: (Upload, default: None) If present, upload + this stream with the request. + download: (Download, default: None) If present, download + data from the request via this stream. + Returns: + (Object) The response message. + """ + config = self.GetMethodConfig('Insert') + upload_config = self.GetUploadConfig('Insert') + return self._RunMethod( + config, request, global_params=global_params, + upload=upload, upload_config=upload_config, + download=download) + + def List(self, request, global_params=None): + """Retrieves a list of objects matching the criteria. + + Args: + request: (StorageObjectsListRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (Objects) The response message. + """ + config = self.GetMethodConfig('List') + return self._RunMethod( + config, request, global_params=global_params) + + def Patch(self, request, global_params=None): + """Updates an object's metadata. This method supports patch semantics. + + Args: + request: (StorageObjectsPatchRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (Object) The response message. + """ + config = self.GetMethodConfig('Patch') + return self._RunMethod( + config, request, global_params=global_params) + + def Update(self, request, global_params=None, download=None): + """Updates an object's metadata. + + Args: + request: (StorageObjectsUpdateRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + download: (Download, default: None) If present, download + data from the request via this stream. + Returns: + (Object) The response message. + """ + config = self.GetMethodConfig('Update') + return self._RunMethod( + config, request, global_params=global_params, + download=download) + + def WatchAll(self, request, global_params=None): + """Watch for changes on all objects in a bucket. + + Args: + request: (StorageObjectsWatchAllRequest) input message + global_params: (StandardQueryParameters, default: None) global arguments + Returns: + (Channel) The response message. + """ + config = self.GetMethodConfig('WatchAll') + return self._RunMethod( + config, request, global_params=global_params) diff --git a/gcloud/storage/_storage_v1_messages.py b/gcloud/storage/_storage_v1_messages.py new file mode 100644 index 000000000000..68651adad303 --- /dev/null +++ b/gcloud/storage/_storage_v1_messages.py @@ -0,0 +1,1795 @@ +"""Generated message classes for storage version v1. + +Lets you store and retrieve potentially-large, immutable data objects. +""" +# NOTE: This file is autogenerated and should not be edited by hand. + +from apitools.base.py import encoding +from apitools.base.py import extra_types +from protorpc import message_types +from protorpc import messages + + +package = 'storage' + + +class Bucket(messages.Message): + """A bucket. + + Messages: + CorsValueListEntry: A CorsValueListEntry object. + LifecycleValue: The bucket's lifecycle configuration. See lifecycle + management for more information. + LoggingValue: The bucket's logging configuration, which defines the + destination bucket and optional name prefix for the current bucket's + logs. + OwnerValue: The owner of the bucket. This is always the project team's + owner group. + VersioningValue: The bucket's versioning configuration. + WebsiteValue: The bucket's website configuration. + + Fields: + acl: Access controls on the bucket. + cors: The bucket's Cross-Origin Resource Sharing (CORS) configuration. + defaultObjectAcl: Default access controls to apply to new objects when no + ACL is provided. + etag: HTTP 1.1 Entity tag for the bucket. + id: The ID of the bucket. + kind: The kind of item this is. For buckets, this is always + storage#bucket. + lifecycle: The bucket's lifecycle configuration. See lifecycle management + for more information. + location: The location of the bucket. Object data for objects in the + bucket resides in physical storage within this region. Defaults to US. + See the developer's guide for the authoritative list. + logging: The bucket's logging configuration, which defines the destination + bucket and optional name prefix for the current bucket's logs. + metageneration: The metadata generation of this bucket. + name: The name of the bucket. + owner: The owner of the bucket. This is always the project team's owner + group. + projectNumber: The project number of the project the bucket belongs to. + selfLink: The URI of this bucket. + storageClass: The bucket's storage class. This defines how objects in the + bucket are stored and determines the SLA and the cost of storage. Values + include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to + STANDARD. For more information, see storage classes. + timeCreated: Creation time of the bucket in RFC 3339 format. + versioning: The bucket's versioning configuration. + website: The bucket's website configuration. + """ + + class CorsValueListEntry(messages.Message): + """A CorsValueListEntry object. + + Fields: + maxAgeSeconds: The value, in seconds, to return in the Access-Control- + Max-Age header used in preflight responses. + method: The list of HTTP methods on which to include CORS response + headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + of methods, and means "any method". + origin: The list of Origins eligible to receive CORS response headers. + Note: "*" is permitted in the list of origins, and means "any Origin". + responseHeader: The list of HTTP headers other than the simple response + headers to give permission for the user-agent to share across domains. + """ + + maxAgeSeconds = messages.IntegerField(1, variant=messages.Variant.INT32) + method = messages.StringField(2, repeated=True) + origin = messages.StringField(3, repeated=True) + responseHeader = messages.StringField(4, repeated=True) + + class LifecycleValue(messages.Message): + """The bucket's lifecycle configuration. See lifecycle management for more + information. + + Messages: + RuleValueListEntry: A RuleValueListEntry object. + + Fields: + rule: A lifecycle management rule, which is made of an action to take + and the condition(s) under which the action will be taken. + """ + + class RuleValueListEntry(messages.Message): + """A RuleValueListEntry object. + + Messages: + ActionValue: The action to take. + ConditionValue: The condition(s) under which the action will be taken. + + Fields: + action: The action to take. + condition: The condition(s) under which the action will be taken. + """ + + class ActionValue(messages.Message): + """The action to take. + + Fields: + type: Type of the action. Currently, only Delete is supported. + """ + + type = messages.StringField(1) + + class ConditionValue(messages.Message): + """The condition(s) under which the action will be taken. + + Fields: + age: Age of an object (in days). This condition is satisfied when an + object reaches the specified age. + createdBefore: A date in RFC 3339 format with only the date part + (for instance, "2013-01-15"). This condition is satisfied when an + object is created before midnight of the specified date in UTC. + isLive: Relevant only for versioned objects. If the value is true, + this condition matches live objects; if the value is false, it + matches archived objects. + numNewerVersions: Relevant only for versioned objects. If the value + is N, this condition is satisfied when there are at least N + versions (including the live version) newer than this version of + the object. + """ + + age = messages.IntegerField(1, variant=messages.Variant.INT32) + createdBefore = extra_types.DateField(2) + isLive = messages.BooleanField(3) + numNewerVersions = messages.IntegerField(4, variant=messages.Variant.INT32) + + action = messages.MessageField('ActionValue', 1) + condition = messages.MessageField('ConditionValue', 2) + + rule = messages.MessageField('RuleValueListEntry', 1, repeated=True) + + class LoggingValue(messages.Message): + """The bucket's logging configuration, which defines the destination + bucket and optional name prefix for the current bucket's logs. + + Fields: + logBucket: The destination bucket where the current bucket's logs should + be placed. + logObjectPrefix: A prefix for log object names. + """ + + logBucket = messages.StringField(1) + logObjectPrefix = messages.StringField(2) + + class OwnerValue(messages.Message): + """The owner of the bucket. This is always the project team's owner group. + + Fields: + entity: The entity, in the form project-owner-projectId. + entityId: The ID for the entity. + """ + + entity = messages.StringField(1) + entityId = messages.StringField(2) + + class VersioningValue(messages.Message): + """The bucket's versioning configuration. + + Fields: + enabled: While set to true, versioning is fully enabled for this bucket. + """ + + enabled = messages.BooleanField(1) + + class WebsiteValue(messages.Message): + """The bucket's website configuration. + + Fields: + mainPageSuffix: Behaves as the bucket's directory index where missing + objects are treated as potential directories. + notFoundPage: The custom object to return when a requested resource is + not found. + """ + + mainPageSuffix = messages.StringField(1) + notFoundPage = messages.StringField(2) + + acl = messages.MessageField('BucketAccessControl', 1, repeated=True) + cors = messages.MessageField('CorsValueListEntry', 2, repeated=True) + defaultObjectAcl = messages.MessageField('ObjectAccessControl', 3, repeated=True) + etag = messages.StringField(4) + id = messages.StringField(5) + kind = messages.StringField(6, default=u'storage#bucket') + lifecycle = messages.MessageField('LifecycleValue', 7) + location = messages.StringField(8) + logging = messages.MessageField('LoggingValue', 9) + metageneration = messages.IntegerField(10) + name = messages.StringField(11) + owner = messages.MessageField('OwnerValue', 12) + projectNumber = messages.IntegerField(13, variant=messages.Variant.UINT64) + selfLink = messages.StringField(14) + storageClass = messages.StringField(15) + timeCreated = message_types.DateTimeField(16) + versioning = messages.MessageField('VersioningValue', 17) + website = messages.MessageField('WebsiteValue', 18) + + +class BucketAccessControl(messages.Message): + """An access-control entry. + + Messages: + ProjectTeamValue: The project team associated with the entity, if any. + + Fields: + bucket: The name of the bucket. + domain: The domain associated with the entity, if any. + email: The email address associated with the entity, if any. + entity: The entity holding the permission, in one of the following forms: + - user-userId - user-email - group-groupId - group-email - domain- + domain - project-team-projectId - allUsers - allAuthenticatedUsers + Examples: - The user liz@example.com would be user-liz@example.com. - + The group example@googlegroups.com would be group- + example@googlegroups.com. - To refer to all members of the Google Apps + for Business domain example.com, the entity would be domain-example.com. + entityId: The ID for the entity, if any. + etag: HTTP 1.1 Entity tag for the access-control entry. + id: The ID of the access-control entry. + kind: The kind of item this is. For bucket access control entries, this is + always storage#bucketAccessControl. + projectTeam: The project team associated with the entity, if any. + role: The access permission for the entity. Can be READER, WRITER, or + OWNER. + selfLink: The link to this access-control entry. + """ + + class ProjectTeamValue(messages.Message): + """The project team associated with the entity, if any. + + Fields: + projectNumber: The project number. + team: The team. Can be owners, editors, or viewers. + """ + + projectNumber = messages.StringField(1) + team = messages.StringField(2) + + bucket = messages.StringField(1) + domain = messages.StringField(2) + email = messages.StringField(3) + entity = messages.StringField(4) + entityId = messages.StringField(5) + etag = messages.StringField(6) + id = messages.StringField(7) + kind = messages.StringField(8, default=u'storage#bucketAccessControl') + projectTeam = messages.MessageField('ProjectTeamValue', 9) + role = messages.StringField(10) + selfLink = messages.StringField(11) + + +class BucketAccessControls(messages.Message): + """An access-control list. + + Fields: + items: The list of items. + kind: The kind of item this is. For lists of bucket access control + entries, this is always storage#bucketAccessControls. + """ + + items = messages.MessageField('BucketAccessControl', 1, repeated=True) + kind = messages.StringField(2, default=u'storage#bucketAccessControls') + + +class Buckets(messages.Message): + """A list of buckets. + + Fields: + items: The list of items. + kind: The kind of item this is. For lists of buckets, this is always + storage#buckets. + nextPageToken: The continuation token, used to page through large result + sets. Provide this value in a subsequent request to return the next page + of results. + """ + + items = messages.MessageField('Bucket', 1, repeated=True) + kind = messages.StringField(2, default=u'storage#buckets') + nextPageToken = messages.StringField(3) + + +class Channel(messages.Message): + """An notification channel used to watch for resource changes. + + Messages: + ParamsValue: Additional parameters controlling delivery channel behavior. + Optional. + + Fields: + address: The address where notifications are delivered for this channel. + expiration: Date and time of notification channel expiration, expressed as + a Unix timestamp, in milliseconds. Optional. + id: A UUID or similar unique string that identifies this channel. + kind: Identifies this as a notification channel used to watch for changes + to a resource. Value: the fixed string "api#channel". + params: Additional parameters controlling delivery channel behavior. + Optional. + payload: A Boolean value to indicate whether payload is wanted. Optional. + resourceId: An opaque ID that identifies the resource being watched on + this channel. Stable across different API versions. + resourceUri: A version-specific identifier for the watched resource. + token: An arbitrary string delivered to the target address with each + notification delivered over this channel. Optional. + type: The type of delivery mechanism used for this channel. + """ + + @encoding.MapUnrecognizedFields('additionalProperties') + class ParamsValue(messages.Message): + """Additional parameters controlling delivery channel behavior. Optional. + + Messages: + AdditionalProperty: An additional property for a ParamsValue object. + + Fields: + additionalProperties: Declares a new parameter by name. + """ + + class AdditionalProperty(messages.Message): + """An additional property for a ParamsValue object. + + Fields: + key: Name of the additional property. + value: A string attribute. + """ + + key = messages.StringField(1) + value = messages.StringField(2) + + additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True) + + address = messages.StringField(1) + expiration = messages.IntegerField(2) + id = messages.StringField(3) + kind = messages.StringField(4, default=u'api#channel') + params = messages.MessageField('ParamsValue', 5) + payload = messages.BooleanField(6) + resourceId = messages.StringField(7) + resourceUri = messages.StringField(8) + token = messages.StringField(9) + type = messages.StringField(10) + + +class ComposeRequest(messages.Message): + """A Compose request. + + Messages: + SourceObjectsValueListEntry: A SourceObjectsValueListEntry object. + + Fields: + destination: Properties of the resulting object. + kind: The kind of item this is. + sourceObjects: The list of source objects that will be concatenated into a + single object. + """ + + class SourceObjectsValueListEntry(messages.Message): + """A SourceObjectsValueListEntry object. + + Messages: + ObjectPreconditionsValue: Conditions that must be met for this operation + to execute. + + Fields: + generation: The generation of this object to use as the source. + name: The source object's name. The source object's bucket is implicitly + the destination bucket. + objectPreconditions: Conditions that must be met for this operation to + execute. + """ + + class ObjectPreconditionsValue(messages.Message): + """Conditions that must be met for this operation to execute. + + Fields: + ifGenerationMatch: Only perform the composition if the generation of + the source object that would be used matches this value. If this + value and a generation are both specified, they must be the same + value or the call will fail. + """ + + ifGenerationMatch = messages.IntegerField(1) + + generation = messages.IntegerField(1) + name = messages.StringField(2) + objectPreconditions = messages.MessageField('ObjectPreconditionsValue', 3) + + destination = messages.MessageField('Object', 1) + kind = messages.StringField(2, default=u'storage#composeRequest') + sourceObjects = messages.MessageField('SourceObjectsValueListEntry', 3, repeated=True) + + +class Object(messages.Message): + """An object. + + Messages: + MetadataValue: User-provided metadata, in key/value pairs. + OwnerValue: The owner of the object. This will always be the uploader of + the object. + + Fields: + acl: Access controls on the object. + bucket: The name of the bucket containing this object. + cacheControl: Cache-Control directive for the object data. + componentCount: Number of underlying components that make up this object. + Components are accumulated by compose operations. + contentDisposition: Content-Disposition of the object data. + contentEncoding: Content-Encoding of the object data. + contentLanguage: Content-Language of the object data. + contentType: Content-Type of the object data. + crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded + using base64. + etag: HTTP 1.1 Entity tag for the object. + generation: The content generation of this object. Used for object + versioning. + id: The ID of the object. + kind: The kind of item this is. For objects, this is always + storage#object. + md5Hash: MD5 hash of the data; encoded using base64. + mediaLink: Media download link. + metadata: User-provided metadata, in key/value pairs. + metageneration: The version of the metadata for this object at this + generation. Used for preconditions and for detecting changes in + metadata. A metageneration number is only meaningful in the context of a + particular generation of a particular object. + name: The name of this object. Required if not specified by URL parameter. + owner: The owner of the object. This will always be the uploader of the + object. + selfLink: The link to this object. + size: Content-Length of the data in bytes. + storageClass: Storage class of the object. + timeDeleted: The deletion time of the object in RFC 3339 format. Will be + returned if and only if this version of the object has been deleted. + updated: The creation or modification time of the object in RFC 3339 + format. For buckets with versioning enabled, changing an object's + metadata does not change this property. + """ + + @encoding.MapUnrecognizedFields('additionalProperties') + class MetadataValue(messages.Message): + """User-provided metadata, in key/value pairs. + + Messages: + AdditionalProperty: An additional property for a MetadataValue object. + + Fields: + additionalProperties: An individual metadata entry. + """ + + class AdditionalProperty(messages.Message): + """An additional property for a MetadataValue object. + + Fields: + key: Name of the additional property. + value: A string attribute. + """ + + key = messages.StringField(1) + value = messages.StringField(2) + + additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True) + + class OwnerValue(messages.Message): + """The owner of the object. This will always be the uploader of the + object. + + Fields: + entity: The entity, in the form user-userId. + entityId: The ID for the entity. + """ + + entity = messages.StringField(1) + entityId = messages.StringField(2) + + acl = messages.MessageField('ObjectAccessControl', 1, repeated=True) + bucket = messages.StringField(2) + cacheControl = messages.StringField(3) + componentCount = messages.IntegerField(4, variant=messages.Variant.INT32) + contentDisposition = messages.StringField(5) + contentEncoding = messages.StringField(6) + contentLanguage = messages.StringField(7) + contentType = messages.StringField(8) + crc32c = messages.StringField(9) + etag = messages.StringField(10) + generation = messages.IntegerField(11) + id = messages.StringField(12) + kind = messages.StringField(13, default=u'storage#object') + md5Hash = messages.StringField(14) + mediaLink = messages.StringField(15) + metadata = messages.MessageField('MetadataValue', 16) + metageneration = messages.IntegerField(17) + name = messages.StringField(18) + owner = messages.MessageField('OwnerValue', 19) + selfLink = messages.StringField(20) + size = messages.IntegerField(21, variant=messages.Variant.UINT64) + storageClass = messages.StringField(22) + timeDeleted = message_types.DateTimeField(23) + updated = message_types.DateTimeField(24) + + +class ObjectAccessControl(messages.Message): + """An access-control entry. + + Messages: + ProjectTeamValue: The project team associated with the entity, if any. + + Fields: + bucket: The name of the bucket. + domain: The domain associated with the entity, if any. + email: The email address associated with the entity, if any. + entity: The entity holding the permission, in one of the following forms: + - user-userId - user-email - group-groupId - group-email - domain- + domain - project-team-projectId - allUsers - allAuthenticatedUsers + Examples: - The user liz@example.com would be user-liz@example.com. - + The group example@googlegroups.com would be group- + example@googlegroups.com. - To refer to all members of the Google Apps + for Business domain example.com, the entity would be domain-example.com. + entityId: The ID for the entity, if any. + etag: HTTP 1.1 Entity tag for the access-control entry. + generation: The content generation of the object. + id: The ID of the access-control entry. + kind: The kind of item this is. For object access control entries, this is + always storage#objectAccessControl. + object: The name of the object. + projectTeam: The project team associated with the entity, if any. + role: The access permission for the entity. Can be READER or OWNER. + selfLink: The link to this access-control entry. + """ + + class ProjectTeamValue(messages.Message): + """The project team associated with the entity, if any. + + Fields: + projectNumber: The project number. + team: The team. Can be owners, editors, or viewers. + """ + + projectNumber = messages.StringField(1) + team = messages.StringField(2) + + bucket = messages.StringField(1) + domain = messages.StringField(2) + email = messages.StringField(3) + entity = messages.StringField(4) + entityId = messages.StringField(5) + etag = messages.StringField(6) + generation = messages.IntegerField(7) + id = messages.StringField(8) + kind = messages.StringField(9, default=u'storage#objectAccessControl') + object = messages.StringField(10) + projectTeam = messages.MessageField('ProjectTeamValue', 11) + role = messages.StringField(12) + selfLink = messages.StringField(13) + + +class ObjectAccessControls(messages.Message): + """An access-control list. + + Fields: + items: The list of items. + kind: The kind of item this is. For lists of object access control + entries, this is always storage#objectAccessControls. + """ + + items = messages.MessageField('extra_types.JsonValue', 1, repeated=True) + kind = messages.StringField(2, default=u'storage#objectAccessControls') + + +class Objects(messages.Message): + """A list of objects. + + Fields: + items: The list of items. + kind: The kind of item this is. For lists of objects, this is always + storage#objects. + nextPageToken: The continuation token, used to page through large result + sets. Provide this value in a subsequent request to return the next page + of results. + prefixes: The list of prefixes of objects matching-but-not-listed up to + and including the requested delimiter. + """ + + items = messages.MessageField('Object', 1, repeated=True) + kind = messages.StringField(2, default=u'storage#objects') + nextPageToken = messages.StringField(3) + prefixes = messages.StringField(4, repeated=True) + + +class StandardQueryParameters(messages.Message): + """Query parameters accepted by all methods. + + Enums: + AltValueValuesEnum: Data format for the response. + + Fields: + alt: Data format for the response. + fields: Selector specifying which fields to include in a partial response. + key: API key. Your API key identifies your project and provides you with + API access, quota, and reports. Required unless you provide an OAuth 2.0 + token. + oauth_token: OAuth 2.0 token for the current user. + prettyPrint: Returns response with indentations and line breaks. + quotaUser: Available to use for quota purposes for server-side + applications. Can be any arbitrary string assigned to a user, but should + not exceed 40 characters. Overrides userIp if both are provided. + trace: A tracing token of the form "token:" to include in api + requests. + userIp: IP address of the site where the request originates. Use this if + you want to enforce per-user limits. + """ + + class AltValueValuesEnum(messages.Enum): + """Data format for the response. + + Values: + json: Responses with Content-Type of application/json + """ + json = 0 + + alt = messages.EnumField('AltValueValuesEnum', 1, default=u'json') + fields = messages.StringField(2) + key = messages.StringField(3) + oauth_token = messages.StringField(4) + prettyPrint = messages.BooleanField(5, default=True) + quotaUser = messages.StringField(6) + trace = messages.StringField(7) + userIp = messages.StringField(8) + + +class StorageBucketAccessControlsDeleteRequest(messages.Message): + """A StorageBucketAccessControlsDeleteRequest object. + + Fields: + bucket: Name of a bucket. + entity: The entity holding the permission. Can be user-userId, user- + emailAddress, group-groupId, group-emailAddress, allUsers, or + allAuthenticatedUsers. + """ + + bucket = messages.StringField(1, required=True) + entity = messages.StringField(2, required=True) + + +class StorageBucketAccessControlsDeleteResponse(messages.Message): + """An empty StorageBucketAccessControlsDelete response.""" + + +class StorageBucketAccessControlsGetRequest(messages.Message): + """A StorageBucketAccessControlsGetRequest object. + + Fields: + bucket: Name of a bucket. + entity: The entity holding the permission. Can be user-userId, user- + emailAddress, group-groupId, group-emailAddress, allUsers, or + allAuthenticatedUsers. + """ + + bucket = messages.StringField(1, required=True) + entity = messages.StringField(2, required=True) + + +class StorageBucketAccessControlsListRequest(messages.Message): + """A StorageBucketAccessControlsListRequest object. + + Fields: + bucket: Name of a bucket. + """ + + bucket = messages.StringField(1, required=True) + + +class StorageBucketsDeleteRequest(messages.Message): + """A StorageBucketsDeleteRequest object. + + Fields: + bucket: Name of a bucket. + ifMetagenerationMatch: If set, only deletes the bucket if its + metageneration matches this value. + ifMetagenerationNotMatch: If set, only deletes the bucket if its + metageneration does not match this value. + """ + + bucket = messages.StringField(1, required=True) + ifMetagenerationMatch = messages.IntegerField(2) + ifMetagenerationNotMatch = messages.IntegerField(3) + + +class StorageBucketsDeleteResponse(messages.Message): + """An empty StorageBucketsDelete response.""" + + +class StorageBucketsGetRequest(messages.Message): + """A StorageBucketsGetRequest object. + + Enums: + ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl. + + Fields: + bucket: Name of a bucket. + ifMetagenerationMatch: Makes the return of the bucket metadata conditional + on whether the bucket's current metageneration matches the given value. + ifMetagenerationNotMatch: Makes the return of the bucket metadata + conditional on whether the bucket's current metageneration does not + match the given value. + projection: Set of properties to return. Defaults to noAcl. + """ + + class ProjectionValueValuesEnum(messages.Enum): + """Set of properties to return. Defaults to noAcl. + + Values: + full: Include all properties. + noAcl: Omit acl and defaultObjectAcl properties. + """ + full = 0 + noAcl = 1 + + bucket = messages.StringField(1, required=True) + ifMetagenerationMatch = messages.IntegerField(2) + ifMetagenerationNotMatch = messages.IntegerField(3) + projection = messages.EnumField('ProjectionValueValuesEnum', 4) + + +class StorageBucketsInsertRequest(messages.Message): + """A StorageBucketsInsertRequest object. + + Enums: + PredefinedAclValueValuesEnum: Apply a predefined set of access controls to + this bucket. + PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of + default object access controls to this bucket. + ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl, + unless the bucket resource specifies acl or defaultObjectAcl properties, + when it defaults to full. + + Fields: + bucket: A Bucket resource to be passed as the request body. + predefinedAcl: Apply a predefined set of access controls to this bucket. + predefinedDefaultObjectAcl: Apply a predefined set of default object + access controls to this bucket. + project: A valid API project identifier. + projection: Set of properties to return. Defaults to noAcl, unless the + bucket resource specifies acl or defaultObjectAcl properties, when it + defaults to full. + """ + + class PredefinedAclValueValuesEnum(messages.Enum): + """Apply a predefined set of access controls to this bucket. + + Values: + authenticatedRead: Project team owners get OWNER access, and + allAuthenticatedUsers get READER access. + private: Project team owners get OWNER access. + projectPrivate: Project team members get access according to their + roles. + publicRead: Project team owners get OWNER access, and allUsers get + READER access. + publicReadWrite: Project team owners get OWNER access, and allUsers get + WRITER access. + """ + authenticatedRead = 0 + private = 1 + projectPrivate = 2 + publicRead = 3 + publicReadWrite = 4 + + class PredefinedDefaultObjectAclValueValuesEnum(messages.Enum): + """Apply a predefined set of default object access controls to this + bucket. + + Values: + authenticatedRead: Object owner gets OWNER access, and + allAuthenticatedUsers get READER access. + bucketOwnerFullControl: Object owner gets OWNER access, and project team + owners get OWNER access. + bucketOwnerRead: Object owner gets OWNER access, and project team owners + get READER access. + private: Object owner gets OWNER access. + projectPrivate: Object owner gets OWNER access, and project team members + get access according to their roles. + publicRead: Object owner gets OWNER access, and allUsers get READER + access. + """ + authenticatedRead = 0 + bucketOwnerFullControl = 1 + bucketOwnerRead = 2 + private = 3 + projectPrivate = 4 + publicRead = 5 + + class ProjectionValueValuesEnum(messages.Enum): + """Set of properties to return. Defaults to noAcl, unless the bucket + resource specifies acl or defaultObjectAcl properties, when it defaults to + full. + + Values: + full: Include all properties. + noAcl: Omit acl and defaultObjectAcl properties. + """ + full = 0 + noAcl = 1 + + bucket = messages.MessageField('Bucket', 1) + predefinedAcl = messages.EnumField('PredefinedAclValueValuesEnum', 2) + predefinedDefaultObjectAcl = messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 3) + project = messages.StringField(4, required=True) + projection = messages.EnumField('ProjectionValueValuesEnum', 5) + + +class StorageBucketsListRequest(messages.Message): + """A StorageBucketsListRequest object. + + Enums: + ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl. + + Fields: + maxResults: Maximum number of buckets to return. + pageToken: A previously-returned page token representing part of the + larger set of results to view. + prefix: Filter results to buckets whose names begin with this prefix. + project: A valid API project identifier. + projection: Set of properties to return. Defaults to noAcl. + """ + + class ProjectionValueValuesEnum(messages.Enum): + """Set of properties to return. Defaults to noAcl. + + Values: + full: Include all properties. + noAcl: Omit acl and defaultObjectAcl properties. + """ + full = 0 + noAcl = 1 + + maxResults = messages.IntegerField(1, variant=messages.Variant.UINT32) + pageToken = messages.StringField(2) + prefix = messages.StringField(3) + project = messages.StringField(4, required=True) + projection = messages.EnumField('ProjectionValueValuesEnum', 5) + + +class StorageBucketsPatchRequest(messages.Message): + """A StorageBucketsPatchRequest object. + + Enums: + PredefinedAclValueValuesEnum: Apply a predefined set of access controls to + this bucket. + PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of + default object access controls to this bucket. + ProjectionValueValuesEnum: Set of properties to return. Defaults to full. + + Fields: + bucket: Name of a bucket. + bucketResource: A Bucket resource to be passed as the request body. + ifMetagenerationMatch: Makes the return of the bucket metadata conditional + on whether the bucket's current metageneration matches the given value. + ifMetagenerationNotMatch: Makes the return of the bucket metadata + conditional on whether the bucket's current metageneration does not + match the given value. + predefinedAcl: Apply a predefined set of access controls to this bucket. + predefinedDefaultObjectAcl: Apply a predefined set of default object + access controls to this bucket. + projection: Set of properties to return. Defaults to full. + """ + + class PredefinedAclValueValuesEnum(messages.Enum): + """Apply a predefined set of access controls to this bucket. + + Values: + authenticatedRead: Project team owners get OWNER access, and + allAuthenticatedUsers get READER access. + private: Project team owners get OWNER access. + projectPrivate: Project team members get access according to their + roles. + publicRead: Project team owners get OWNER access, and allUsers get + READER access. + publicReadWrite: Project team owners get OWNER access, and allUsers get + WRITER access. + """ + authenticatedRead = 0 + private = 1 + projectPrivate = 2 + publicRead = 3 + publicReadWrite = 4 + + class PredefinedDefaultObjectAclValueValuesEnum(messages.Enum): + """Apply a predefined set of default object access controls to this + bucket. + + Values: + authenticatedRead: Object owner gets OWNER access, and + allAuthenticatedUsers get READER access. + bucketOwnerFullControl: Object owner gets OWNER access, and project team + owners get OWNER access. + bucketOwnerRead: Object owner gets OWNER access, and project team owners + get READER access. + private: Object owner gets OWNER access. + projectPrivate: Object owner gets OWNER access, and project team members + get access according to their roles. + publicRead: Object owner gets OWNER access, and allUsers get READER + access. + """ + authenticatedRead = 0 + bucketOwnerFullControl = 1 + bucketOwnerRead = 2 + private = 3 + projectPrivate = 4 + publicRead = 5 + + class ProjectionValueValuesEnum(messages.Enum): + """Set of properties to return. Defaults to full. + + Values: + full: Include all properties. + noAcl: Omit acl and defaultObjectAcl properties. + """ + full = 0 + noAcl = 1 + + bucket = messages.StringField(1, required=True) + bucketResource = messages.MessageField('Bucket', 2) + ifMetagenerationMatch = messages.IntegerField(3) + ifMetagenerationNotMatch = messages.IntegerField(4) + predefinedAcl = messages.EnumField('PredefinedAclValueValuesEnum', 5) + predefinedDefaultObjectAcl = messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6) + projection = messages.EnumField('ProjectionValueValuesEnum', 7) + + +class StorageBucketsUpdateRequest(messages.Message): + """A StorageBucketsUpdateRequest object. + + Enums: + PredefinedAclValueValuesEnum: Apply a predefined set of access controls to + this bucket. + PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of + default object access controls to this bucket. + ProjectionValueValuesEnum: Set of properties to return. Defaults to full. + + Fields: + bucket: Name of a bucket. + bucketResource: A Bucket resource to be passed as the request body. + ifMetagenerationMatch: Makes the return of the bucket metadata conditional + on whether the bucket's current metageneration matches the given value. + ifMetagenerationNotMatch: Makes the return of the bucket metadata + conditional on whether the bucket's current metageneration does not + match the given value. + predefinedAcl: Apply a predefined set of access controls to this bucket. + predefinedDefaultObjectAcl: Apply a predefined set of default object + access controls to this bucket. + projection: Set of properties to return. Defaults to full. + """ + + class PredefinedAclValueValuesEnum(messages.Enum): + """Apply a predefined set of access controls to this bucket. + + Values: + authenticatedRead: Project team owners get OWNER access, and + allAuthenticatedUsers get READER access. + private: Project team owners get OWNER access. + projectPrivate: Project team members get access according to their + roles. + publicRead: Project team owners get OWNER access, and allUsers get + READER access. + publicReadWrite: Project team owners get OWNER access, and allUsers get + WRITER access. + """ + authenticatedRead = 0 + private = 1 + projectPrivate = 2 + publicRead = 3 + publicReadWrite = 4 + + class PredefinedDefaultObjectAclValueValuesEnum(messages.Enum): + """Apply a predefined set of default object access controls to this + bucket. + + Values: + authenticatedRead: Object owner gets OWNER access, and + allAuthenticatedUsers get READER access. + bucketOwnerFullControl: Object owner gets OWNER access, and project team + owners get OWNER access. + bucketOwnerRead: Object owner gets OWNER access, and project team owners + get READER access. + private: Object owner gets OWNER access. + projectPrivate: Object owner gets OWNER access, and project team members + get access according to their roles. + publicRead: Object owner gets OWNER access, and allUsers get READER + access. + """ + authenticatedRead = 0 + bucketOwnerFullControl = 1 + bucketOwnerRead = 2 + private = 3 + projectPrivate = 4 + publicRead = 5 + + class ProjectionValueValuesEnum(messages.Enum): + """Set of properties to return. Defaults to full. + + Values: + full: Include all properties. + noAcl: Omit acl and defaultObjectAcl properties. + """ + full = 0 + noAcl = 1 + + bucket = messages.StringField(1, required=True) + bucketResource = messages.MessageField('Bucket', 2) + ifMetagenerationMatch = messages.IntegerField(3) + ifMetagenerationNotMatch = messages.IntegerField(4) + predefinedAcl = messages.EnumField('PredefinedAclValueValuesEnum', 5) + predefinedDefaultObjectAcl = messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6) + projection = messages.EnumField('ProjectionValueValuesEnum', 7) + + +class StorageChannelsStopResponse(messages.Message): + """An empty StorageChannelsStop response.""" + + +class StorageDefaultObjectAccessControlsDeleteRequest(messages.Message): + """A StorageDefaultObjectAccessControlsDeleteRequest object. + + Fields: + bucket: Name of a bucket. + entity: The entity holding the permission. Can be user-userId, user- + emailAddress, group-groupId, group-emailAddress, allUsers, or + allAuthenticatedUsers. + """ + + bucket = messages.StringField(1, required=True) + entity = messages.StringField(2, required=True) + + +class StorageDefaultObjectAccessControlsDeleteResponse(messages.Message): + """An empty StorageDefaultObjectAccessControlsDelete response.""" + + +class StorageDefaultObjectAccessControlsGetRequest(messages.Message): + """A StorageDefaultObjectAccessControlsGetRequest object. + + Fields: + bucket: Name of a bucket. + entity: The entity holding the permission. Can be user-userId, user- + emailAddress, group-groupId, group-emailAddress, allUsers, or + allAuthenticatedUsers. + """ + + bucket = messages.StringField(1, required=True) + entity = messages.StringField(2, required=True) + + +class StorageDefaultObjectAccessControlsListRequest(messages.Message): + """A StorageDefaultObjectAccessControlsListRequest object. + + Fields: + bucket: Name of a bucket. + ifMetagenerationMatch: If present, only return default ACL listing if the + bucket's current metageneration matches this value. + ifMetagenerationNotMatch: If present, only return default ACL listing if + the bucket's current metageneration does not match the given value. + """ + + bucket = messages.StringField(1, required=True) + ifMetagenerationMatch = messages.IntegerField(2) + ifMetagenerationNotMatch = messages.IntegerField(3) + + +class StorageObjectAccessControlsDeleteRequest(messages.Message): + """A StorageObjectAccessControlsDeleteRequest object. + + Fields: + bucket: Name of a bucket. + entity: The entity holding the permission. Can be user-userId, user- + emailAddress, group-groupId, group-emailAddress, allUsers, or + allAuthenticatedUsers. + generation: If present, selects a specific revision of this object (as + opposed to the latest version, the default). + object: Name of the object. + """ + + bucket = messages.StringField(1, required=True) + entity = messages.StringField(2, required=True) + generation = messages.IntegerField(3) + object = messages.StringField(4, required=True) + + +class StorageObjectAccessControlsDeleteResponse(messages.Message): + """An empty StorageObjectAccessControlsDelete response.""" + + +class StorageObjectAccessControlsGetRequest(messages.Message): + """A StorageObjectAccessControlsGetRequest object. + + Fields: + bucket: Name of a bucket. + entity: The entity holding the permission. Can be user-userId, user- + emailAddress, group-groupId, group-emailAddress, allUsers, or + allAuthenticatedUsers. + generation: If present, selects a specific revision of this object (as + opposed to the latest version, the default). + object: Name of the object. + """ + + bucket = messages.StringField(1, required=True) + entity = messages.StringField(2, required=True) + generation = messages.IntegerField(3) + object = messages.StringField(4, required=True) + + +class StorageObjectAccessControlsInsertRequest(messages.Message): + """A StorageObjectAccessControlsInsertRequest object. + + Fields: + bucket: Name of a bucket. + generation: If present, selects a specific revision of this object (as + opposed to the latest version, the default). + object: Name of the object. + objectAccessControl: A ObjectAccessControl resource to be passed as the + request body. + """ + + bucket = messages.StringField(1, required=True) + generation = messages.IntegerField(2) + object = messages.StringField(3, required=True) + objectAccessControl = messages.MessageField('ObjectAccessControl', 4) + + +class StorageObjectAccessControlsListRequest(messages.Message): + """A StorageObjectAccessControlsListRequest object. + + Fields: + bucket: Name of a bucket. + generation: If present, selects a specific revision of this object (as + opposed to the latest version, the default). + object: Name of the object. + """ + + bucket = messages.StringField(1, required=True) + generation = messages.IntegerField(2) + object = messages.StringField(3, required=True) + + +class StorageObjectAccessControlsPatchRequest(messages.Message): + """A StorageObjectAccessControlsPatchRequest object. + + Fields: + bucket: Name of a bucket. + entity: The entity holding the permission. Can be user-userId, user- + emailAddress, group-groupId, group-emailAddress, allUsers, or + allAuthenticatedUsers. + generation: If present, selects a specific revision of this object (as + opposed to the latest version, the default). + object: Name of the object. + objectAccessControl: A ObjectAccessControl resource to be passed as the + request body. + """ + + bucket = messages.StringField(1, required=True) + entity = messages.StringField(2, required=True) + generation = messages.IntegerField(3) + object = messages.StringField(4, required=True) + objectAccessControl = messages.MessageField('ObjectAccessControl', 5) + + +class StorageObjectAccessControlsUpdateRequest(messages.Message): + """A StorageObjectAccessControlsUpdateRequest object. + + Fields: + bucket: Name of a bucket. + entity: The entity holding the permission. Can be user-userId, user- + emailAddress, group-groupId, group-emailAddress, allUsers, or + allAuthenticatedUsers. + generation: If present, selects a specific revision of this object (as + opposed to the latest version, the default). + object: Name of the object. + objectAccessControl: A ObjectAccessControl resource to be passed as the + request body. + """ + + bucket = messages.StringField(1, required=True) + entity = messages.StringField(2, required=True) + generation = messages.IntegerField(3) + object = messages.StringField(4, required=True) + objectAccessControl = messages.MessageField('ObjectAccessControl', 5) + + +class StorageObjectsComposeRequest(messages.Message): + """A StorageObjectsComposeRequest object. + + Enums: + DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access + controls to the destination object. + + Fields: + composeRequest: A ComposeRequest resource to be passed as the request + body. + destinationBucket: Name of the bucket in which to store the new object. + destinationObject: Name of the new object. + destinationPredefinedAcl: Apply a predefined set of access controls to the + destination object. + encryptionAlgorithm: Specifies the encryption algorithm that was used to + encrypt the object, if any. Only 'AES256' is supported currently. + Algorithm, key, and key hash must be supplied together. + encryptionKey: Provides a base64-encoded 256-bit key that was used to + encrypt the object, if any. Algorithm, key, and key hash must be + supplied together. + encryptionKeyHash: Provides the digest of the key for error-checking + transmission. A digest is in the format of '='. Algorithm, key, and key + hash must be supplied together. + ifGenerationMatch: Makes the operation conditional on whether the object's + current generation matches the given value. + ifMetagenerationMatch: Makes the operation conditional on whether the + object's current metageneration matches the given value. + """ + + class DestinationPredefinedAclValueValuesEnum(messages.Enum): + """Apply a predefined set of access controls to the destination object. + + Values: + authenticatedRead: Object owner gets OWNER access, and + allAuthenticatedUsers get READER access. + bucketOwnerFullControl: Object owner gets OWNER access, and project team + owners get OWNER access. + bucketOwnerRead: Object owner gets OWNER access, and project team owners + get READER access. + private: Object owner gets OWNER access. + projectPrivate: Object owner gets OWNER access, and project team members + get access according to their roles. + publicRead: Object owner gets OWNER access, and allUsers get READER + access. + """ + authenticatedRead = 0 + bucketOwnerFullControl = 1 + bucketOwnerRead = 2 + private = 3 + projectPrivate = 4 + publicRead = 5 + + composeRequest = messages.MessageField('ComposeRequest', 1) + destinationBucket = messages.StringField(2, required=True) + destinationObject = messages.StringField(3, required=True) + destinationPredefinedAcl = messages.EnumField('DestinationPredefinedAclValueValuesEnum', 4) + encryptionAlgorithm = messages.StringField(5) + encryptionKey = messages.StringField(6) + encryptionKeyHash = messages.StringField(7) + ifGenerationMatch = messages.IntegerField(8) + ifMetagenerationMatch = messages.IntegerField(9) + + +class StorageObjectsCopyRequest(messages.Message): + """A StorageObjectsCopyRequest object. + + Enums: + DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access + controls to the destination object. + ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl, + unless the object resource specifies the acl property, when it defaults + to full. + + Fields: + destinationBucket: Name of the bucket in which to store the new object. + Overrides the provided object metadata's bucket value, if any. + destinationObject: Name of the new object. Required when the object + metadata is not otherwise provided. Overrides the object metadata's name + value, if any. + destinationPredefinedAcl: Apply a predefined set of access controls to the + destination object. + encryptionAlgorithm: Specifies the encryption algorithm that was used to + encrypt the object, if any. Only 'AES256' is supported currently. + Algorithm, key, and key hash must be supplied together. + encryptionKey: Provides a base64-encoded 256-bit key that was used to + encrypt the object, if any. Algorithm, key, and key hash must be + supplied together. + encryptionKeyHash: Provides the digest of the key for error-checking + transmission. A digest is in the format of '='. Algorithm, key, and key + hash must be supplied together. + ifGenerationMatch: Makes the operation conditional on whether the + destination object's current generation matches the given value. + ifGenerationNotMatch: Makes the operation conditional on whether the + destination object's current generation does not match the given value. + ifMetagenerationMatch: Makes the operation conditional on whether the + destination object's current metageneration matches the given value. + ifMetagenerationNotMatch: Makes the operation conditional on whether the + destination object's current metageneration does not match the given + value. + ifSourceGenerationMatch: Makes the operation conditional on whether the + source object's generation matches the given value. + ifSourceGenerationNotMatch: Makes the operation conditional on whether the + source object's generation does not match the given value. + ifSourceMetagenerationMatch: Makes the operation conditional on whether + the source object's current metageneration matches the given value. + ifSourceMetagenerationNotMatch: Makes the operation conditional on whether + the source object's current metageneration does not match the given + value. + object: A Object resource to be passed as the request body. + projection: Set of properties to return. Defaults to noAcl, unless the + object resource specifies the acl property, when it defaults to full. + sourceBucket: Name of the bucket in which to find the source object. + sourceGeneration: If present, selects a specific revision of the source + object (as opposed to the latest version, the default). + sourceObject: Name of the source object. + """ + + class DestinationPredefinedAclValueValuesEnum(messages.Enum): + """Apply a predefined set of access controls to the destination object. + + Values: + authenticatedRead: Object owner gets OWNER access, and + allAuthenticatedUsers get READER access. + bucketOwnerFullControl: Object owner gets OWNER access, and project team + owners get OWNER access. + bucketOwnerRead: Object owner gets OWNER access, and project team owners + get READER access. + private: Object owner gets OWNER access. + projectPrivate: Object owner gets OWNER access, and project team members + get access according to their roles. + publicRead: Object owner gets OWNER access, and allUsers get READER + access. + """ + authenticatedRead = 0 + bucketOwnerFullControl = 1 + bucketOwnerRead = 2 + private = 3 + projectPrivate = 4 + publicRead = 5 + + class ProjectionValueValuesEnum(messages.Enum): + """Set of properties to return. Defaults to noAcl, unless the object + resource specifies the acl property, when it defaults to full. + + Values: + full: Include all properties. + noAcl: Omit the acl property. + """ + full = 0 + noAcl = 1 + + destinationBucket = messages.StringField(1, required=True) + destinationObject = messages.StringField(2, required=True) + destinationPredefinedAcl = messages.EnumField('DestinationPredefinedAclValueValuesEnum', 3) + encryptionAlgorithm = messages.StringField(4) + encryptionKey = messages.StringField(5) + encryptionKeyHash = messages.StringField(6) + ifGenerationMatch = messages.IntegerField(7) + ifGenerationNotMatch = messages.IntegerField(8) + ifMetagenerationMatch = messages.IntegerField(9) + ifMetagenerationNotMatch = messages.IntegerField(10) + ifSourceGenerationMatch = messages.IntegerField(11) + ifSourceGenerationNotMatch = messages.IntegerField(12) + ifSourceMetagenerationMatch = messages.IntegerField(13) + ifSourceMetagenerationNotMatch = messages.IntegerField(14) + object = messages.MessageField('Object', 15) + projection = messages.EnumField('ProjectionValueValuesEnum', 16) + sourceBucket = messages.StringField(17, required=True) + sourceGeneration = messages.IntegerField(18) + sourceObject = messages.StringField(19, required=True) + + +class StorageObjectsDeleteRequest(messages.Message): + """A StorageObjectsDeleteRequest object. + + Fields: + bucket: Name of the bucket in which the object resides. + generation: If present, permanently deletes a specific revision of this + object (as opposed to the latest version, the default). + ifGenerationMatch: Makes the operation conditional on whether the object's + current generation matches the given value. + ifGenerationNotMatch: Makes the operation conditional on whether the + object's current generation does not match the given value. + ifMetagenerationMatch: Makes the operation conditional on whether the + object's current metageneration matches the given value. + ifMetagenerationNotMatch: Makes the operation conditional on whether the + object's current metageneration does not match the given value. + object: Name of the object. + """ + + bucket = messages.StringField(1, required=True) + generation = messages.IntegerField(2) + ifGenerationMatch = messages.IntegerField(3) + ifGenerationNotMatch = messages.IntegerField(4) + ifMetagenerationMatch = messages.IntegerField(5) + ifMetagenerationNotMatch = messages.IntegerField(6) + object = messages.StringField(7, required=True) + + +class StorageObjectsDeleteResponse(messages.Message): + """An empty StorageObjectsDelete response.""" + + +class StorageObjectsGetRequest(messages.Message): + """A StorageObjectsGetRequest object. + + Enums: + ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl. + + Fields: + bucket: Name of the bucket in which the object resides. + encryptionAlgorithm: Specifies the encryption algorithm that would be used + to decrypt the object. Only 'AES256' is supported currently. Algorithm, + key, and key hash must be supplied together. + encryptionKey: Provides a base64-encoded 256-bit key to decrypt the + object. Algorithm, key, and key hash must be supplied together. + encryptionKeyHash: Provides the digest of the key for error-checking + transmission. A digest is in the format of '='. Algorithm, key, and key + hash must be supplied together. + generation: If present, selects a specific revision of this object (as + opposed to the latest version, the default). + ifGenerationMatch: Makes the operation conditional on whether the object's + generation matches the given value. + ifGenerationNotMatch: Makes the operation conditional on whether the + object's generation does not match the given value. + ifMetagenerationMatch: Makes the operation conditional on whether the + object's current metageneration matches the given value. + ifMetagenerationNotMatch: Makes the operation conditional on whether the + object's current metageneration does not match the given value. + object: Name of the object. + projection: Set of properties to return. Defaults to noAcl. + """ + + class ProjectionValueValuesEnum(messages.Enum): + """Set of properties to return. Defaults to noAcl. + + Values: + full: Include all properties. + noAcl: Omit the acl property. + """ + full = 0 + noAcl = 1 + + bucket = messages.StringField(1, required=True) + encryptionAlgorithm = messages.StringField(2) + encryptionKey = messages.StringField(3) + encryptionKeyHash = messages.StringField(4) + generation = messages.IntegerField(5) + ifGenerationMatch = messages.IntegerField(6) + ifGenerationNotMatch = messages.IntegerField(7) + ifMetagenerationMatch = messages.IntegerField(8) + ifMetagenerationNotMatch = messages.IntegerField(9) + object = messages.StringField(10, required=True) + projection = messages.EnumField('ProjectionValueValuesEnum', 11) + + +class StorageObjectsInsertRequest(messages.Message): + """A StorageObjectsInsertRequest object. + + Enums: + PredefinedAclValueValuesEnum: Apply a predefined set of access controls to + this object. + ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl, + unless the object resource specifies the acl property, when it defaults + to full. + + Fields: + bucket: Name of the bucket in which to store the new object. Overrides the + provided object metadata's bucket value, if any. + contentEncoding: If set, sets the contentEncoding property of the final + object to this value. Setting this parameter is equivalent to setting + the contentEncoding metadata property. This can be useful when uploading + an object with uploadType=media to indicate the encoding of the content + being uploaded. + encryptionAlgorithm: Specifies the encryption algorithm that would be used + to encrypt the object. Only 'AES256' is supported currently. Algorithm, + key, and key hash must be supplied together. + encryptionKey: Provides a base64-encoded 256-bit key to encrypt the + object. Algorithm, key, and key hash must be supplied together. + encryptionKeyHash: Provides the digest of the key for error-checking + transmission. A digest is in the format of '='. Algorithm, key, and key + hash must be supplied together. + ifGenerationMatch: Makes the operation conditional on whether the object's + current generation matches the given value. + ifGenerationNotMatch: Makes the operation conditional on whether the + object's current generation does not match the given value. + ifMetagenerationMatch: Makes the operation conditional on whether the + object's current metageneration matches the given value. + ifMetagenerationNotMatch: Makes the operation conditional on whether the + object's current metageneration does not match the given value. + name: Name of the object. Required when the object metadata is not + otherwise provided. Overrides the object metadata's name value, if any. + object: A Object resource to be passed as the request body. + predefinedAcl: Apply a predefined set of access controls to this object. + projection: Set of properties to return. Defaults to noAcl, unless the + object resource specifies the acl property, when it defaults to full. + """ + + class PredefinedAclValueValuesEnum(messages.Enum): + """Apply a predefined set of access controls to this object. + + Values: + authenticatedRead: Object owner gets OWNER access, and + allAuthenticatedUsers get READER access. + bucketOwnerFullControl: Object owner gets OWNER access, and project team + owners get OWNER access. + bucketOwnerRead: Object owner gets OWNER access, and project team owners + get READER access. + private: Object owner gets OWNER access. + projectPrivate: Object owner gets OWNER access, and project team members + get access according to their roles. + publicRead: Object owner gets OWNER access, and allUsers get READER + access. + """ + authenticatedRead = 0 + bucketOwnerFullControl = 1 + bucketOwnerRead = 2 + private = 3 + projectPrivate = 4 + publicRead = 5 + + class ProjectionValueValuesEnum(messages.Enum): + """Set of properties to return. Defaults to noAcl, unless the object + resource specifies the acl property, when it defaults to full. + + Values: + full: Include all properties. + noAcl: Omit the acl property. + """ + full = 0 + noAcl = 1 + + bucket = messages.StringField(1, required=True) + contentEncoding = messages.StringField(2) + encryptionAlgorithm = messages.StringField(3) + encryptionKey = messages.StringField(4) + encryptionKeyHash = messages.StringField(5) + ifGenerationMatch = messages.IntegerField(6) + ifGenerationNotMatch = messages.IntegerField(7) + ifMetagenerationMatch = messages.IntegerField(8) + ifMetagenerationNotMatch = messages.IntegerField(9) + name = messages.StringField(10) + object = messages.MessageField('Object', 11) + predefinedAcl = messages.EnumField('PredefinedAclValueValuesEnum', 12) + projection = messages.EnumField('ProjectionValueValuesEnum', 13) + + +class StorageObjectsListRequest(messages.Message): + """A StorageObjectsListRequest object. + + Enums: + ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl. + + Fields: + bucket: Name of the bucket in which to look for objects. + delimiter: Returns results in a directory-like mode. items will contain + only objects whose names, aside from the prefix, do not contain + delimiter. Objects whose names, aside from the prefix, contain delimiter + will have their name, truncated after the delimiter, returned in + prefixes. Duplicate prefixes are omitted. + maxResults: Maximum number of items plus prefixes to return. As duplicate + prefixes are omitted, fewer total results may be returned than + requested. + pageToken: A previously-returned page token representing part of the + larger set of results to view. + prefix: Filter results to objects whose names begin with this prefix. + projection: Set of properties to return. Defaults to noAcl. + versions: If true, lists all versions of a file as distinct results. + """ + + class ProjectionValueValuesEnum(messages.Enum): + """Set of properties to return. Defaults to noAcl. + + Values: + full: Include all properties. + noAcl: Omit the acl property. + """ + full = 0 + noAcl = 1 + + bucket = messages.StringField(1, required=True) + delimiter = messages.StringField(2) + maxResults = messages.IntegerField(3, variant=messages.Variant.UINT32) + pageToken = messages.StringField(4) + prefix = messages.StringField(5) + projection = messages.EnumField('ProjectionValueValuesEnum', 6) + versions = messages.BooleanField(7) + + +class StorageObjectsPatchRequest(messages.Message): + """A StorageObjectsPatchRequest object. + + Enums: + PredefinedAclValueValuesEnum: Apply a predefined set of access controls to + this object. + ProjectionValueValuesEnum: Set of properties to return. Defaults to full. + + Fields: + bucket: Name of the bucket in which the object resides. + encryptionAlgorithm: For downloading encrypted objects, specifies the + encryption algorithm that would be used to decrypt the object. Only + 'AES256' is supported currently. Algorithm, key, and key hash must be + supplied together. + encryptionKey: For downloading encrypted objects, provides a + base64-encoded 256-bit key to decrypt the object. Algorithm, key, and + key hash must be supplied together. + encryptionKeyHash: For downloading encrypted objects, provides the digest + of the key for error-checking transmission. A digest is in the format of + '='. Algorithm, key, and key hash must be supplied together. + generation: If present, selects a specific revision of this object (as + opposed to the latest version, the default). + ifGenerationMatch: Makes the operation conditional on whether the object's + current generation matches the given value. + ifGenerationNotMatch: Makes the operation conditional on whether the + object's current generation does not match the given value. + ifMetagenerationMatch: Makes the operation conditional on whether the + object's current metageneration matches the given value. + ifMetagenerationNotMatch: Makes the operation conditional on whether the + object's current metageneration does not match the given value. + object: Name of the object. + objectResource: A Object resource to be passed as the request body. + predefinedAcl: Apply a predefined set of access controls to this object. + projection: Set of properties to return. Defaults to full. + """ + + class PredefinedAclValueValuesEnum(messages.Enum): + """Apply a predefined set of access controls to this object. + + Values: + authenticatedRead: Object owner gets OWNER access, and + allAuthenticatedUsers get READER access. + bucketOwnerFullControl: Object owner gets OWNER access, and project team + owners get OWNER access. + bucketOwnerRead: Object owner gets OWNER access, and project team owners + get READER access. + private: Object owner gets OWNER access. + projectPrivate: Object owner gets OWNER access, and project team members + get access according to their roles. + publicRead: Object owner gets OWNER access, and allUsers get READER + access. + """ + authenticatedRead = 0 + bucketOwnerFullControl = 1 + bucketOwnerRead = 2 + private = 3 + projectPrivate = 4 + publicRead = 5 + + class ProjectionValueValuesEnum(messages.Enum): + """Set of properties to return. Defaults to full. + + Values: + full: Include all properties. + noAcl: Omit the acl property. + """ + full = 0 + noAcl = 1 + + bucket = messages.StringField(1, required=True) + encryptionAlgorithm = messages.StringField(2) + encryptionKey = messages.StringField(3) + encryptionKeyHash = messages.StringField(4) + generation = messages.IntegerField(5) + ifGenerationMatch = messages.IntegerField(6) + ifGenerationNotMatch = messages.IntegerField(7) + ifMetagenerationMatch = messages.IntegerField(8) + ifMetagenerationNotMatch = messages.IntegerField(9) + object = messages.StringField(10, required=True) + objectResource = messages.MessageField('Object', 11) + predefinedAcl = messages.EnumField('PredefinedAclValueValuesEnum', 12) + projection = messages.EnumField('ProjectionValueValuesEnum', 13) + + +class StorageObjectsUpdateRequest(messages.Message): + """A StorageObjectsUpdateRequest object. + + Enums: + PredefinedAclValueValuesEnum: Apply a predefined set of access controls to + this object. + ProjectionValueValuesEnum: Set of properties to return. Defaults to full. + + Fields: + bucket: Name of the bucket in which the object resides. + encryptionAlgorithm: For downloading encrypted objects, specifies the + encryption algorithm that would be used to decrypt the object. Only + 'AES256' is supported currently. Algorithm, key, and key hash must be + supplied together. + encryptionKey: For downloading encrypted objects, provides a + base64-encoded 256-bit key to decrypt the object. Algorithm, key, and + key hash must be supplied together. + encryptionKeyHash: For downloading encrypted objects, provides the digest + of the key for error-checking transmission. A digest is in the format of + '='. Algorithm, key, and key hash must be supplied together. + generation: If present, selects a specific revision of this object (as + opposed to the latest version, the default). + ifGenerationMatch: Makes the operation conditional on whether the object's + current generation matches the given value. + ifGenerationNotMatch: Makes the operation conditional on whether the + object's current generation does not match the given value. + ifMetagenerationMatch: Makes the operation conditional on whether the + object's current metageneration matches the given value. + ifMetagenerationNotMatch: Makes the operation conditional on whether the + object's current metageneration does not match the given value. + object: Name of the object. + objectResource: A Object resource to be passed as the request body. + predefinedAcl: Apply a predefined set of access controls to this object. + projection: Set of properties to return. Defaults to full. + """ + + class PredefinedAclValueValuesEnum(messages.Enum): + """Apply a predefined set of access controls to this object. + + Values: + authenticatedRead: Object owner gets OWNER access, and + allAuthenticatedUsers get READER access. + bucketOwnerFullControl: Object owner gets OWNER access, and project team + owners get OWNER access. + bucketOwnerRead: Object owner gets OWNER access, and project team owners + get READER access. + private: Object owner gets OWNER access. + projectPrivate: Object owner gets OWNER access, and project team members + get access according to their roles. + publicRead: Object owner gets OWNER access, and allUsers get READER + access. + """ + authenticatedRead = 0 + bucketOwnerFullControl = 1 + bucketOwnerRead = 2 + private = 3 + projectPrivate = 4 + publicRead = 5 + + class ProjectionValueValuesEnum(messages.Enum): + """Set of properties to return. Defaults to full. + + Values: + full: Include all properties. + noAcl: Omit the acl property. + """ + full = 0 + noAcl = 1 + + bucket = messages.StringField(1, required=True) + encryptionAlgorithm = messages.StringField(2) + encryptionKey = messages.StringField(3) + encryptionKeyHash = messages.StringField(4) + generation = messages.IntegerField(5) + ifGenerationMatch = messages.IntegerField(6) + ifGenerationNotMatch = messages.IntegerField(7) + ifMetagenerationMatch = messages.IntegerField(8) + ifMetagenerationNotMatch = messages.IntegerField(9) + object = messages.StringField(10, required=True) + objectResource = messages.MessageField('Object', 11) + predefinedAcl = messages.EnumField('PredefinedAclValueValuesEnum', 12) + projection = messages.EnumField('ProjectionValueValuesEnum', 13) + + +class StorageObjectsWatchAllRequest(messages.Message): + """A StorageObjectsWatchAllRequest object. + + Enums: + ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl. + + Fields: + bucket: Name of the bucket in which to look for objects. + channel: A Channel resource to be passed as the request body. + delimiter: Returns results in a directory-like mode. items will contain + only objects whose names, aside from the prefix, do not contain + delimiter. Objects whose names, aside from the prefix, contain delimiter + will have their name, truncated after the delimiter, returned in + prefixes. Duplicate prefixes are omitted. + maxResults: Maximum number of items plus prefixes to return. As duplicate + prefixes are omitted, fewer total results may be returned than + requested. + pageToken: A previously-returned page token representing part of the + larger set of results to view. + prefix: Filter results to objects whose names begin with this prefix. + projection: Set of properties to return. Defaults to noAcl. + versions: If true, lists all versions of a file as distinct results. + """ + + class ProjectionValueValuesEnum(messages.Enum): + """Set of properties to return. Defaults to noAcl. + + Values: + full: Include all properties. + noAcl: Omit the acl property. + """ + full = 0 + noAcl = 1 + + bucket = messages.StringField(1, required=True) + channel = messages.MessageField('Channel', 2) + delimiter = messages.StringField(3) + maxResults = messages.IntegerField(4, variant=messages.Variant.UINT32) + pageToken = messages.StringField(5) + prefix = messages.StringField(6) + projection = messages.EnumField('ProjectionValueValuesEnum', 7) + versions = messages.BooleanField(8) + + diff --git a/gcloud/storage/blob.py b/gcloud/storage/blob.py index ff5aef4f9f4b..f76feb9d587a 100644 --- a/gcloud/storage/blob.py +++ b/gcloud/storage/blob.py @@ -25,8 +25,8 @@ import six from six.moves.urllib.parse import quote # pylint: disable=F0401 -from _gcloud_vendor.apitools.base.py import http_wrapper -from _gcloud_vendor.apitools.base.py import transfer +from apitools.base.py import http_wrapper +from apitools.base.py import transfer from gcloud.credentials import generate_signed_url from gcloud.exceptions import NotFound @@ -224,7 +224,7 @@ def download_to_file(self, file_obj): download_url = self.media_link # Use apitools 'Download' facility. - download = transfer.Download.FromStream(file_obj, auto_transfer=False) + download = transfer.Download.FromStream(file_obj, auto_transfer=True) download.chunksize = self.CHUNK_SIZE headers = {'Range': 'bytes=0-%d' % (self.CHUNK_SIZE - 1)} request = http_wrapper.Request(download_url, 'GET', headers) @@ -347,7 +347,7 @@ def upload_from_file(self, file_obj, rewind=False, size=None, # Should we be passing callbacks through from caller? We can't # pass them as None, because apitools wants to print to the console # by default. - if upload.strategy == transfer._RESUMABLE_UPLOAD: + if upload.strategy == transfer.RESUMABLE_UPLOAD: http_response = upload.StreamInChunks( callback=lambda *args: None, finish_callback=lambda *args: None) diff --git a/gcloud/storage/test_blob.py b/gcloud/storage/test_blob.py index 242477630780..f32643d80b35 100644 --- a/gcloud/storage/test_blob.py +++ b/gcloud/storage/test_blob.py @@ -254,6 +254,7 @@ def test_download_to_file(self): connection = _Connection( (chunk1_response, b'abc'), (chunk2_response, b'def'), + ({'status': OK}, b''), ) bucket = _Bucket(connection) MEDIA_LINK = 'http://example.com/media/' @@ -279,6 +280,7 @@ def test_download_to_filename(self): connection = _Connection( (chunk1_response, b'abc'), (chunk2_response, b'def'), + ({'status': OK}, b''), ) bucket = _Bucket(connection) MEDIA_LINK = 'http://example.com/media/' @@ -311,6 +313,7 @@ def test_download_as_string(self): connection = _Connection( (chunk1_response, b'abc'), (chunk2_response, b'def'), + ({'status': OK}, b''), ) bucket = _Bucket(connection) MEDIA_LINK = 'http://example.com/media/' @@ -386,8 +389,8 @@ def test_upload_from_file_resumable(self): from six.moves.urllib.parse import urlsplit from tempfile import NamedTemporaryFile from gcloud._testing import _Monkey - from _gcloud_vendor.apitools.base.py import http_wrapper - from _gcloud_vendor.apitools.base.py import transfer + from apitools.base.py import http_wrapper + from apitools.base.py import transfer BLOB_NAME = 'blob-name' UPLOAD_URL = 'http://example.com/upload/name/key' DATA = b'ABCDEF' @@ -445,7 +448,7 @@ def test_upload_from_file_w_slash_in_name(self): from six.moves.urllib.parse import parse_qsl from six.moves.urllib.parse import urlsplit from tempfile import NamedTemporaryFile - from _gcloud_vendor.apitools.base.py import http_wrapper + from apitools.base.py import http_wrapper BLOB_NAME = 'parent/child' UPLOAD_URL = 'http://example.com/upload/name/parent%2Fchild' DATA = b'ABCDEF' @@ -465,8 +468,12 @@ def test_upload_from_file_w_slash_in_name(self): fh.write(DATA) fh.flush() blob.upload_from_file(fh, rewind=True) + self.assertEqual(fh.tell(), len(DATA)) rq = connection.http._requested self.assertEqual(len(rq), 1) + self.assertEqual(rq[0]['redirections'], 5) + self.assertEqual(rq[0]['body'], DATA) + self.assertEqual(rq[0]['connection_type'], None) self.assertEqual(rq[0]['method'], 'POST') uri = rq[0]['uri'] scheme, netloc, path, qs, _ = urlsplit(uri) @@ -487,7 +494,7 @@ def _upload_from_filename_test_helper(self, properties=None, from six.moves.urllib.parse import parse_qsl from six.moves.urllib.parse import urlsplit from tempfile import NamedTemporaryFile - from _gcloud_vendor.apitools.base.py import http_wrapper + from apitools.base.py import http_wrapper BLOB_NAME = 'blob-name' UPLOAD_URL = 'http://example.com/upload/name/key' DATA = b'ABCDEF' @@ -551,7 +558,7 @@ def test_upload_from_string_w_bytes(self): from six.moves.http_client import OK from six.moves.urllib.parse import parse_qsl from six.moves.urllib.parse import urlsplit - from _gcloud_vendor.apitools.base.py import http_wrapper + from apitools.base.py import http_wrapper BLOB_NAME = 'blob-name' UPLOAD_URL = 'http://example.com/upload/name/key' DATA = b'ABCDEF' @@ -588,7 +595,7 @@ def test_upload_from_string_w_text(self): from six.moves.http_client import OK from six.moves.urllib.parse import parse_qsl from six.moves.urllib.parse import urlsplit - from _gcloud_vendor.apitools.base.py import http_wrapper + from apitools.base.py import http_wrapper BLOB_NAME = 'blob-name' UPLOAD_URL = 'http://example.com/upload/name/key' DATA = u'ABCDEF\u1234' @@ -1052,6 +1059,8 @@ def build_api_url(self, path, query_params=None, class _HTTP(_Responder): + connections = {} # For google-apitools debugging. + def request(self, uri, method, headers, body, **kw): return self._respond(uri=uri, method=method, headers=headers, body=body, **kw) diff --git a/run_pylint.py b/run_pylint.py index 107b25d649a0..efefcb1bb0fe 100644 --- a/run_pylint.py +++ b/run_pylint.py @@ -29,7 +29,6 @@ IGNORED_DIRECTORIES = [ - '_gcloud_vendor/', ] IGNORED_FILES = [ 'gcloud/datastore/_datastore_v1_pb2.py', diff --git a/setup.py b/setup.py index c66fb94a4103..c8a2d7ac0c8d 100644 --- a/setup.py +++ b/setup.py @@ -12,6 +12,7 @@ REQUIREMENTS = [ + 'google-apitools', 'httplib2', 'oauth2client >= 1.4.6', 'protobuf >= 2.5.0', diff --git a/tox.ini b/tox.ini index 03e669373a2f..e730fc496d7c 100644 --- a/tox.ini +++ b/tox.ini @@ -43,7 +43,7 @@ deps = Sphinx [pep8] -exclude = gcloud/datastore/_datastore_v1_pb2.py,docs/conf.py,*.egg/,.*/,_gcloud_vendor/ +exclude = gcloud/datastore/_datastore_v1_pb2.py,docs/conf.py,*.egg/,.*/ verbose = 1 [testenv:lint]