diff --git a/system_tests/attempt_system_tests.py b/system_tests/attempt_system_tests.py index c450f8da61d7..5a0d3a787a7d 100644 --- a/system_tests/attempt_system_tests.py +++ b/system_tests/attempt_system_tests.py @@ -26,17 +26,18 @@ import subprocess import sys +from run_system_test import FailedSystemTestModule from run_system_test import run_module_tests -MODULES = ( - 'bigquery', +MODULES = ( # ordered from most to least stable 'datastore', - 'logging', - 'monitoring', - 'pubsub', 'storage', + 'bigquery', + 'pubsub', + 'logging', 'translate', + 'monitoring', ) if sys.version_info[:2] == (2, 7): MODULES += ('bigtable', 'bigtable-happybase') @@ -111,9 +112,14 @@ def prepare_to_run(): def main(): """Run all the system tests if necessary.""" prepare_to_run() + failed_modules = 0 for module in MODULES: - run_module_tests(module) + try: + run_module_tests(module) + except FailedSystemTestModule: + failed_modules += 1 + sys.exit(failed_modules) if __name__ == '__main__': main() diff --git a/system_tests/bigquery.py b/system_tests/bigquery.py index 6f3a005a478d..f7ff5706683c 100644 --- a/system_tests/bigquery.py +++ b/system_tests/bigquery.py @@ -13,7 +13,6 @@ # limitations under the License. import operator -import time import unittest @@ -22,7 +21,9 @@ from gcloud import bigquery from gcloud.exceptions import Forbidden -from retry import Retry +from retry import RetryErrors +from retry import RetryInstanceState +from retry import RetryResult from system_test_utils import unique_resource_id @@ -86,7 +87,13 @@ def test_patch_dataset(self): def test_update_dataset(self): dataset = Config.CLIENT.dataset(DATASET_NAME) self.assertFalse(dataset.exists()) - dataset.create() + + # We need to wait to stay within the rate limits. + # The alternative outcome is a 403 Forbidden response from upstream. + # See: https://cloud.google.com/bigquery/quota-policy + retry = RetryErrors(Forbidden, max_tries=2, delay=30) + retry(dataset.create)() + self.to_delete.append(dataset) self.assertTrue(dataset.exists()) after = [grant for grant in dataset.access_grants @@ -96,11 +103,8 @@ def test_update_dataset(self): # We need to wait to stay within the rate limits. # The alternative outcome is a 403 Forbidden response from upstream. # See: https://cloud.google.com/bigquery/quota-policy - @Retry(Forbidden, tries=2, delay=30) - def update_dataset(): - dataset.update() + retry(dataset.update)() - update_dataset() self.assertEqual(len(dataset.access_grants), len(after)) for found, expected in zip(dataset.access_grants, after): self.assertEqual(found.role, expected.role) @@ -202,11 +206,9 @@ def test_update_table(self): # We need to wait to stay within the rate limits. # The alternative outcome is a 403 Forbidden response from upstream. # See: https://cloud.google.com/bigquery/quota-policy - @Retry(Forbidden, tries=2, delay=30) - def create_dataset(): - dataset.create() + retry = RetryErrors(Forbidden, max_tries=2, delay=30) + retry(dataset.create)() - create_dataset() self.to_delete.append(dataset) TABLE_NAME = 'test_table' full_name = bigquery.SchemaField('full_name', 'STRING', @@ -261,15 +263,15 @@ def test_load_table_then_dump_table(self): self.assertEqual(len(errors), 0) rows = () - counter = 9 + + def _has_rows(result): + return len(result[0]) > 0 + # Allow for 90 seconds of "warm up" before rows visible. See: # https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability - - while len(rows) == 0 and counter > 0: - counter -= 1 - rows, _, _ = table.fetch_data() - if len(rows) == 0: - time.sleep(10) + # 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds + retry = RetryResult(_has_rows, max_tries=8) + rows, _, _ = retry(table.fetch_data)() by_age = operator.itemgetter(1) self.assertEqual(sorted(rows, key=by_age), @@ -329,13 +331,14 @@ def test_load_table_from_storage_then_dump_table(self): job.begin() - counter = 9 # Allow for 90 seconds of lag. + def _job_done(instance): + return instance.state in ('DONE', 'done') - while job.state not in ('DONE', 'done') and counter > 0: - counter -= 1 - job.reload() - if job.state not in ('DONE', 'done'): - time.sleep(10) + # Allow for 90 seconds of "warm up" before rows visible. See: + # https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability + # 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds + retry = RetryInstanceState(_job_done, max_tries=8) + retry(job.reload)() self.assertTrue(job.state in ('DONE', 'done')) diff --git a/system_tests/bigtable.py b/system_tests/bigtable.py index c78faa7fb4a8..943625610b0b 100644 --- a/system_tests/bigtable.py +++ b/system_tests/bigtable.py @@ -14,7 +14,6 @@ import datetime import operator -import time import unittest @@ -32,6 +31,8 @@ from gcloud.bigtable.row_data import PartialRowData from gcloud.environment_vars import TESTS_PROJECT +from retry import RetryErrors +from retry import RetryResult from system_test_utils import unique_resource_id @@ -75,39 +76,28 @@ def _operation_wait(operation, max_attempts=5): :rtype: bool :returns: Boolean indicating if the operation finished. """ - total_sleep = 0 - while not operation.finished(): - if total_sleep > max_attempts: - return False - time.sleep(1) - total_sleep += 1 - return True + def _operation_finished(result): + return result + retry = RetryResult(_operation_finished, max_tries=max_attempts) + return retry(operation.finished)() -def _retry_backoff(meth, *args, **kw): + +def _retry_on_unavailable(exc): + """Retry only AbortionErrors whose status code is 'UNAVAILABLE'.""" from grpc.beta.interfaces import StatusCode - from grpc.framework.interfaces.face.face import AbortionError - backoff_intervals = [1, 2, 4, 8] - while True: - try: - return meth(*args, **kw) - except AbortionError as error: - if error.code != StatusCode.UNAVAILABLE: - raise - if backoff_intervals: - time.sleep(backoff_intervals.pop(0)) - else: - raise + return exc.code == StatusCode.UNAVAILABLE def setUpModule(): + from grpc.framework.interfaces.face.face import AbortionError _helpers.PROJECT = TESTS_PROJECT Config.CLIENT = Client(admin=True) Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID) Config.CLIENT.start() - instances, failed_locations = _retry_backoff( - Config.CLIENT.list_instances) + retry = RetryErrors(AbortionError, error_predicate=_retry_on_unavailable) + instances, failed_locations = retry(Config.CLIENT.list_instances)() if len(failed_locations) != 0: raise ValueError('List instances failed in module set up.') diff --git a/system_tests/logging_.py b/system_tests/logging_.py index a7a910e95a79..6f95d01c0b64 100644 --- a/system_tests/logging_.py +++ b/system_tests/logging_.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time - import unittest from gcloud import _helpers from gcloud.environment_vars import TESTS_PROJECT from gcloud import logging +from retry import RetryErrors +from retry import RetryResult from system_test_utils import unique_resource_id @@ -33,27 +33,10 @@ TOPIC_NAME = 'gcloud-python-system-testing%s' % (_RESOURCE_ID,) -def _retry_backoff(result_predicate, meth, *args, **kw): +def _retry_on_unavailable(exc): + """Retry only AbortionErrors whose status code is 'UNAVAILABLE'.""" from grpc.beta.interfaces import StatusCode - from grpc.framework.interfaces.face.face import AbortionError - backoff_intervals = [1, 2, 4, 8] - while True: - try: - result = meth(*args, **kw) - except AbortionError as error: - if error.code != StatusCode.UNAVAILABLE: - raise - if backoff_intervals: - time.sleep(backoff_intervals.pop(0)) - continue - else: - raise - if result_predicate(result): - return result - if backoff_intervals: - time.sleep(backoff_intervals.pop(0)) - else: - raise RuntimeError('%s: %s %s' % (meth, args, kw)) + return exc.code == StatusCode.UNAVAILABLE def _has_entries(result): @@ -81,28 +64,26 @@ def setUp(self): def tearDown(self): from gcloud.exceptions import NotFound + retry = RetryErrors(NotFound) for doomed in self.to_delete: - backoff_intervals = [1, 2, 4, 8] - while True: - try: - doomed.delete() - break - except NotFound: - if backoff_intervals: - time.sleep(backoff_intervals.pop(0)) - else: - raise + retry(doomed.delete)() @staticmethod def _logger_name(): return 'system-tests-logger' + unique_resource_id('-') + def _list_entries(self, logger): + from grpc.framework.interfaces.face.face import AbortionError + inner = RetryResult(_has_entries)(logger.list_entries) + outer = RetryErrors(AbortionError, _retry_on_unavailable)(inner) + return outer() + def test_log_text(self): TEXT_PAYLOAD = 'System test: test_log_text' logger = Config.CLIENT.logger(self._logger_name()) self.to_delete.append(logger) logger.log_text(TEXT_PAYLOAD) - entries, _ = _retry_backoff(_has_entries, logger.list_entries) + entries, _ = self._list_entries(logger) self.assertEqual(len(entries), 1) self.assertEqual(entries[0].payload, TEXT_PAYLOAD) @@ -123,7 +104,7 @@ def test_log_text_w_metadata(self): logger.log_text(TEXT_PAYLOAD, insert_id=INSERT_ID, severity=SEVERITY, http_request=REQUEST) - entries, _ = _retry_backoff(_has_entries, logger.list_entries) + entries, _ = self._list_entries(logger) self.assertEqual(len(entries), 1) @@ -146,7 +127,7 @@ def test_log_struct(self): self.to_delete.append(logger) logger.log_struct(JSON_PAYLOAD) - entries, _ = _retry_backoff(_has_entries, logger.list_entries) + entries, _ = self._list_entries(logger) self.assertEqual(len(entries), 1) self.assertEqual(entries[0].payload, JSON_PAYLOAD) @@ -171,7 +152,7 @@ def test_log_struct_w_metadata(self): logger.log_struct(JSON_PAYLOAD, insert_id=INSERT_ID, severity=SEVERITY, http_request=REQUEST) - entries, _ = _retry_backoff(_has_entries, logger.list_entries) + entries, _ = self._list_entries(logger) self.assertEqual(len(entries), 1) self.assertEqual(entries[0].payload, JSON_PAYLOAD) @@ -205,10 +186,12 @@ def test_list_metrics(self): set([DEFAULT_METRIC_NAME])) def test_reload_metric(self): + from gcloud.exceptions import Conflict + retry = RetryErrors(Conflict) metric = Config.CLIENT.metric( DEFAULT_METRIC_NAME, DEFAULT_FILTER, DEFAULT_DESCRIPTION) self.assertFalse(metric.exists()) - metric.create() + retry(metric.create)() self.to_delete.append(metric) metric.filter_ = 'logName:other' metric.description = 'local changes' @@ -217,12 +200,14 @@ def test_reload_metric(self): self.assertEqual(metric.description, DEFAULT_DESCRIPTION) def test_update_metric(self): + from gcloud.exceptions import Conflict + retry = RetryErrors(Conflict) NEW_FILTER = 'logName:other' NEW_DESCRIPTION = 'updated' metric = Config.CLIENT.metric( DEFAULT_METRIC_NAME, DEFAULT_FILTER, DEFAULT_DESCRIPTION) self.assertFalse(metric.exists()) - metric.create() + retry(metric.create)() self.to_delete.append(metric) metric.filter_ = NEW_FILTER metric.description = NEW_DESCRIPTION @@ -324,10 +309,12 @@ def test_list_sinks(self): set([DEFAULT_SINK_NAME])) def test_reload_sink(self): + from gcloud.exceptions import Conflict + retry = RetryErrors(Conflict) uri = self._init_bigquery_dataset() sink = Config.CLIENT.sink(DEFAULT_SINK_NAME, DEFAULT_FILTER, uri) self.assertFalse(sink.exists()) - sink.create() + retry(sink.create)() self.to_delete.append(sink) sink.filter_ = 'BOGUS FILTER' sink.destination = 'BOGUS DESTINATION' @@ -336,13 +323,15 @@ def test_reload_sink(self): self.assertEqual(sink.destination, uri) def test_update_sink(self): + from gcloud.exceptions import Conflict + retry = RetryErrors(Conflict) bucket_uri = self._init_storage_bucket() dataset_uri = self._init_bigquery_dataset() UPDATED_FILTER = 'logName:syslog' sink = Config.CLIENT.sink( DEFAULT_SINK_NAME, DEFAULT_FILTER, bucket_uri) self.assertFalse(sink.exists()) - sink.create() + retry(sink.create)() self.to_delete.append(sink) sink.filter_ = UPDATED_FILTER sink.destination = dataset_uri diff --git a/system_tests/pubsub.py b/system_tests/pubsub.py index d651b2a7b33a..2f3d3295373c 100644 --- a/system_tests/pubsub.py +++ b/system_tests/pubsub.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import time import unittest import httplib2 @@ -23,6 +22,8 @@ from gcloud.environment_vars import TESTS_PROJECT from gcloud import pubsub +from retry import RetryInstanceState +from retry import RetryResult from system_test_utils import EmulatorCreds from system_test_utils import unique_resource_id @@ -69,6 +70,7 @@ def test_create_topic(self): self.assertEqual(topic.name, topic_name) def test_list_topics(self): + before, _ = Config.CLIENT.list_topics() topics_to_create = [ 'new' + unique_resource_id(), 'newer' + unique_resource_id(), @@ -80,8 +82,13 @@ def test_list_topics(self): self.to_delete.append(topic) # Retrieve the topics. - all_topics, _ = Config.CLIENT.list_topics() - created = [topic for topic in all_topics + def _all_created(result): + return len(result[0]) == len(before) + len(topics_to_create) + + retry = RetryResult(_all_created) + after, _ = retry(Config.CLIENT.list_topics)() + + created = [topic for topic in after if topic.name in topics_to_create and topic.project == Config.CLIENT.project] self.assertEqual(len(created), len(topics_to_create)) @@ -133,7 +140,12 @@ def test_list_subscriptions(self): self.to_delete.append(subscription) # Retrieve the subscriptions. - all_subscriptions, _ = topic.list_subscriptions() + def _all_created(result): + return len(result[0]) == len(subscriptions_to_create) + + retry = RetryResult(_all_created) + all_subscriptions, _ = retry(topic.list_subscriptions)() + created = [subscription for subscription in all_subscriptions if subscription.name in subscriptions_to_create] self.assertEqual(len(created), len(subscriptions_to_create)) @@ -185,12 +197,14 @@ def test_topic_iam_policy(self): topic_name = 'test-topic-iam-policy-topic' + unique_resource_id('-') topic = Config.CLIENT.topic(topic_name) topic.create() - count = 5 - while count > 0 and not topic.exists(): - time.sleep(1) - count -= 1 + + # Retry / backoff up to 7 seconds (1 + 2 + 4) + retry = RetryResult(lambda result: result, max_tries=4) + retry(topic.exists)() + self.assertTrue(topic.exists()) self.to_delete.append(topic) + if topic.check_iam_permissions([PUBSUB_TOPICS_GET_IAM_POLICY]): policy = topic.get_iam_policy() policy.viewers.add(policy.user('jjg@google.com')) @@ -203,21 +217,24 @@ def test_subscription_iam_policy(self): topic_name = 'test-sub-iam-policy-topic' + unique_resource_id('-') topic = Config.CLIENT.topic(topic_name) topic.create() - count = 5 - while count > 0 and not topic.exists(): - time.sleep(1) - count -= 1 + + # Retry / backoff up to 7 seconds (1 + 2 + 4) + retry = RetryResult(lambda result: result, max_tries=4) + retry(topic.exists)() + self.assertTrue(topic.exists()) self.to_delete.append(topic) SUB_NAME = 'test-sub-iam-policy-sub' + unique_resource_id('-') subscription = topic.subscription(SUB_NAME) subscription.create() - count = 5 - while count > 0 and not subscription.exists(): - time.sleep(1) - count -= 1 + + # Retry / backoff up to 7 seconds (1 + 2 + 4) + retry = RetryResult(lambda result: result, max_tries=4) + retry(subscription.exists)() + self.assertTrue(subscription.exists()) self.to_delete.insert(0, subscription) + if subscription.check_iam_permissions( [PUBSUB_SUBSCRIPTIONS_GET_IAM_POLICY]): policy = subscription.get_iam_policy() @@ -246,5 +263,12 @@ def test_fetch_delete_subscription_w_deleted_topic(self): if subscription.name == ORPHANED] self.assertEqual(len(created), 1) orphaned = created[0] + + def _no_topic(instance): + return instance.topic is None + + retry = RetryInstanceState(_no_topic, max_tries=6) + retry(orphaned.reload)() + self.assertTrue(orphaned.topic is None) orphaned.delete() diff --git a/system_tests/retry.py b/system_tests/retry.py index 8e1f01a720fe..34e2bf93e015 100644 --- a/system_tests/retry.py +++ b/system_tests/retry.py @@ -3,53 +3,187 @@ import six +MAX_TRIES = 4 +DELAY = 1 +BACKOFF = 2 -class Retry(object): - """Retry class for retrying eventually consistent resources in testing.""" - def __init__(self, exception, tries=4, delay=3, backoff=2, logger=None): - """Retry calling the decorated function using an exponential backoff. +def _retry_all(_): + """Retry all caught exceptions.""" + return True - :type exception: Exception or tuple of Exceptions - :param exception: The exception to check or may be a tuple of - exceptions to check. - :type tries: int - :param tries: Number of times to try (not retry) before giving up. +class RetryBase(object): + """Base for retrying calling a decorated function w/ exponential backoff. - :type delay: int - :param delay: Initial delay between retries in seconds. + :type max_tries: int + :param max_tries: Number of times to try (not retry) before giving up. - :type backoff: int - :param backoff: Backoff multiplier e.g. value of 2 will double the - delay each retry. + :type delay: int + :param delay: Initial delay between retries in seconds. - :type logger: logging.Logger instance - :param logger: Logger to use. If None, print. - """ + :type backoff: int + :param backoff: Backoff multiplier e.g. value of 2 will double the + delay each retry. - self.exception = exception - self.tries = tries + :type logger: logging.Logger instance + :param logger: Logger to use. If None, print. + """ + def __init__(self, max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, + logger=None): + self.max_tries = max_tries self.delay = delay self.backoff = backoff self.logger = logger.warning if logger else six.print_ + +class RetryErrors(RetryBase): + """Decorator for retrying given exceptions in testing. + + :type exception: Exception or tuple of Exceptions + :param exception: The exception to check or may be a tuple of + exceptions to check. + + :type error_predicate: function, takes caught exception, returns bool + :param error_predicate: Predicate evaluating whether to retry after a + caught exception. + + :type max_tries: int + :param max_tries: Number of times to try (not retry) before giving up. + + :type delay: int + :param delay: Initial delay between retries in seconds. + + :type backoff: int + :param backoff: Backoff multiplier e.g. value of 2 will double the + delay each retry. + + :type logger: logging.Logger instance + :param logger: Logger to use. If None, print. + """ + def __init__(self, exception, error_predicate=_retry_all, + max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, + logger=None): + super(RetryErrors, self).__init__(max_tries, delay, backoff, logger) + self.exception = exception + self.error_predicate = error_predicate + def __call__(self, to_wrap): @wraps(to_wrap) def wrapped_function(*args, **kwargs): - tries_counter = self.tries - delay = self.delay - while tries_counter > 0: + tries = 0 + while tries < self.max_tries: try: return to_wrap(*args, **kwargs) except self.exception as caught_exception: + + if not self.error_predicate(caught_exception): + raise + + delay = self.delay * self.backoff**tries msg = ("%s, Trying again in %d seconds..." % - (str(caught_exception), delay)) + (caught_exception, delay)) self.logger(msg) time.sleep(delay) - tries_counter -= 1 - delay *= self.backoff + tries += 1 + return to_wrap(*args, **kwargs) + + return wrapped_function + + +class RetryResult(RetryBase): + """Decorator for retrying based on non-error result. + + :type result_predicate: function, takes result, returns bool + :param result_predicate: Predicate evaluating whether to retry after a + result is returned. + + :type max_tries: int + :param max_tries: Number of times to try (not retry) before giving up. + + :type delay: int + :param delay: Initial delay between retries in seconds. + + :type backoff: int + :param backoff: Backoff multiplier e.g. value of 2 will double the + delay each retry. + + :type logger: logging.Logger instance + :param logger: Logger to use. If None, print. + """ + def __init__(self, result_predicate, + max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, + logger=None): + super(RetryResult, self).__init__(max_tries, delay, backoff, logger) + self.result_predicate = result_predicate + + def __call__(self, to_wrap): + @wraps(to_wrap) + def wrapped_function(*args, **kwargs): + tries = 0 + while tries < self.max_tries: + result = to_wrap(*args, **kwargs) + if self.result_predicate(result): + return result + + delay = self.delay * self.backoff**tries + msg = "%s. Trying again in %d seconds..." % ( + self.result_predicate.__name__, delay,) + self.logger(msg) + + time.sleep(delay) + tries += 1 + return to_wrap(*args, **kwargs) + + return wrapped_function + + +class RetryInstanceState(RetryBase): + """Decorator for retrying based on instance state. + + :type instance_predicate: function, takes instance, returns bool + :param instance_predicate: Predicate evaluating whether to retry after an + API-invoking method is called. + + :type max_tries: int + :param max_tries: Number of times to try (not retry) before giving up. + + :type delay: int + :param delay: Initial delay between retries in seconds. + + :type backoff: int + :param backoff: Backoff multiplier e.g. value of 2 will double the + delay each retry. + + :type logger: logging.Logger instance + :param logger: Logger to use. If None, print. + """ + def __init__(self, instance_predicate, + max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, + logger=None): + super(RetryInstanceState, self).__init__( + max_tries, delay, backoff, logger) + self.instance_predicate = instance_predicate + + def __call__(self, to_wrap): + instance = to_wrap.__self__ # only instance methods allowed + + @wraps(to_wrap) + def wrapped_function(*args, **kwargs): + tries = 0 + while tries < self.max_tries: + result = to_wrap(*args, **kwargs) + if self.instance_predicate(instance): + return result + + delay = self.delay * self.backoff**tries + msg = "%s. Trying again in %d seconds..." % ( + self.instance_predicate.__name__, delay,) + self.logger(msg) + + time.sleep(delay) + tries += 1 return to_wrap(*args, **kwargs) return wrapped_function diff --git a/system_tests/run_system_test.py b/system_tests/run_system_test.py index e17b10f2f12f..81f69f3816a3 100644 --- a/system_tests/run_system_test.py +++ b/system_tests/run_system_test.py @@ -41,6 +41,10 @@ } +class FailedSystemTestModule(Exception): + pass + + def get_parser(): parser = argparse.ArgumentParser( description='GCloud test runner against actual project.') @@ -68,14 +72,17 @@ def run_module_tests(module_name, ignore_requirements=False): test_result = unittest.TextTestRunner(verbosity=2).run(suite) # Exit if not successful. if not test_result.wasSuccessful(): - sys.exit(1) + raise FailedSystemTestModule(module_name) def main(): parser = get_parser() args = parser.parse_args() - run_module_tests(args.package, - ignore_requirements=args.ignore_requirements) + try: + run_module_tests(args.package, + ignore_requirements=args.ignore_requirements) + except FailedSystemTestModule: + sys.exit(1) if __name__ == '__main__': diff --git a/system_tests/storage.py b/system_tests/storage.py index ef55de21db63..2b46b7d11ded 100644 --- a/system_tests/storage.py +++ b/system_tests/storage.py @@ -27,6 +27,7 @@ from gcloud.storage._helpers import _base64_md5hash from system_test_utils import unique_resource_id +from retry import RetryErrors HTTP = httplib2.Http() @@ -52,7 +53,8 @@ def setUpModule(): def tearDownModule(): - Config.TEST_BUCKET.delete(force=True) + retry = RetryErrors(exceptions.Conflict) + retry(Config.TEST_BUCKET.delete)(force=True) class TestStorageBuckets(unittest.TestCase): @@ -253,7 +255,10 @@ def setUpClass(cls): super(TestStoragePseudoHierarchy, cls).setUpClass() # Make sure bucket empty before beginning. for blob in cls.bucket.list_blobs(): - blob.delete() + try: + blob.delete() + except exceptions.NotFound: # eventual consistency + pass simple_path = cls.FILES['simple']['path'] blob = storage.Blob(cls.FILENAMES[0], bucket=cls.bucket)