From 981dd4a32660c4cf991764ee113334487eb96627 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Wed, 25 Jan 2017 16:51:01 -0500 Subject: [PATCH 1/2] Vision support for batch processing part one. --- vision/google/cloud/vision/_gax.py | 16 +++++++------- vision/google/cloud/vision/_http.py | 16 +++++++------- vision/unit_tests/_fixtures.py | 33 +++++++++++++++++++++++++++++ vision/unit_tests/test__gax.py | 4 ++-- vision/unit_tests/test__http.py | 16 +++++++++++--- 5 files changed, 64 insertions(+), 21 deletions(-) diff --git a/vision/google/cloud/vision/_gax.py b/vision/google/cloud/vision/_gax.py index 755840e106ac..bef20f9d8049 100644 --- a/vision/google/cloud/vision/_gax.py +++ b/vision/google/cloud/vision/_gax.py @@ -39,8 +39,8 @@ def annotate(self, image, features): :type features: list :param features: List of :class:`~google.cloud.vision.feature.Feature`. - :rtype: :class:`~google.cloud.vision.annotations.Annotations` - :returns: Instance of ``Annotations`` with results or ``None``. + :rtype: :class:`~google.cloud.vision.annotations.Annotations` or list + :returns: Instance of ``Annotations`` or list of ``Annotations``. """ gapic_features = [_to_gapic_feature(feature) for feature in features] gapic_image = _to_gapic_image(image) @@ -48,12 +48,12 @@ def annotate(self, image, features): image=gapic_image, features=gapic_features) requests = [request] annotator_client = self._annotator_client - images = annotator_client.batch_annotate_images(requests) - if len(images.responses) == 1: - return Annotations.from_pb(images.responses[0]) - elif len(images.responses) > 1: - raise NotImplementedError( - 'Multiple image processing is not yet supported.') + responses = annotator_client.batch_annotate_images(requests).responses + if len(responses) == 1: + return Annotations.from_pb(responses[0]) + elif len(responses) > 1: + return [Annotations.from_pb(response) + for response in responses] def _to_gapic_feature(feature): diff --git a/vision/google/cloud/vision/_http.py b/vision/google/cloud/vision/_http.py index 348588693a61..b1347fdc3e4e 100644 --- a/vision/google/cloud/vision/_http.py +++ b/vision/google/cloud/vision/_http.py @@ -41,20 +41,20 @@ def annotate(self, image, features): based on the number of Feature Types. See: https://cloud.google.com/vision/docs/pricing - :rtype: dict - :returns: List of annotations. + :rtype: list or class:`~googe.cloud.vision.annotations.Annotations` + :returns: Instance of ``Annotations`` or List of ``Annotations``. """ request = _make_request(image, features) data = {'requests': [request]} api_response = self._connection.api_request( method='POST', path='/images:annotate', data=data) - images = api_response.get('responses') - if len(images) == 1: - return Annotations.from_api_repr(images[0]) - elif len(images) > 1: - raise NotImplementedError( - 'Multiple image processing is not yet supported.') + responses = api_response.get('responses') + if len(responses) == 1: + return Annotations.from_api_repr(responses[0]) + elif len(responses) > 1: + return [Annotations.from_api_repr(response) + for response in responses] def _make_request(image, features): diff --git a/vision/unit_tests/_fixtures.py b/vision/unit_tests/_fixtures.py index 26262b442eb5..a008c66274ce 100644 --- a/vision/unit_tests/_fixtures.py +++ b/vision/unit_tests/_fixtures.py @@ -1688,6 +1688,39 @@ } +MULTIPLE_RESPONSE = { + 'responses': [ + { + 'labelAnnotations': [ + { + 'mid': '/m/0k4j', + 'description': 'automobile', + 'score': 0.9776855 + }, + { + 'mid': '/m/07yv9', + 'description': 'vehicle', + 'score': 0.947987 + }, + { + 'mid': '/m/07r04', + 'description': 'truck', + 'score': 0.88429511 + }, + ], + }, + { + 'safeSearchAnnotation': { + 'adult': 'VERY_UNLIKELY', + 'spoof': 'UNLIKELY', + 'medical': 'POSSIBLE', + 'violence': 'VERY_UNLIKELY' + }, + }, + ], +} + + SAFE_SEARCH_DETECTION_RESPONSE = { 'responses': [ { diff --git a/vision/unit_tests/test__gax.py b/vision/unit_tests/test__gax.py index 1e1dfbeaf7e6..70d7a83a2715 100644 --- a/vision/unit_tests/test__gax.py +++ b/vision/unit_tests/test__gax.py @@ -102,9 +102,9 @@ def test_annotate_multiple_results(self): gax_api._annotator_client = mock.Mock( spec_set=['batch_annotate_images'], **mock_response) with mock.patch('google.cloud.vision._gax.Annotations'): - with self.assertRaises(NotImplementedError): - gax_api.annotate(image, [feature]) + responses = gax_api.annotate(image, [feature]) + self.assertEqual(len(responses), 2) gax_api._annotator_client.batch_annotate_images.assert_called() diff --git a/vision/unit_tests/test__http.py b/vision/unit_tests/test__http.py index b875a77db2d8..9481aeb14ff4 100644 --- a/vision/unit_tests/test__http.py +++ b/vision/unit_tests/test__http.py @@ -50,6 +50,8 @@ def test_call_annotate_with_more_than_one_result(self): from google.cloud.vision.feature import Feature from google.cloud.vision.feature import FeatureTypes from google.cloud.vision.image import Image + from google.cloud.vision.likelihood import Likelihood + from unit_tests._fixtures import MULTIPLE_RESPONSE client = mock.Mock(spec_set=['_connection']) feature = Feature(FeatureTypes.LABEL_DETECTION, 5) @@ -58,9 +60,17 @@ def test_call_annotate_with_more_than_one_result(self): http_api = self._make_one(client) http_api._connection = mock.Mock(spec_set=['api_request']) - http_api._connection.api_request.return_value = {'responses': [1, 2]} - with self.assertRaises(NotImplementedError): - http_api.annotate(image, [feature]) + http_api._connection.api_request.return_value = MULTIPLE_RESPONSE + responses = http_api.annotate(image, [feature]) + + self.assertEqual(len(responses), 2) + image_one = responses[0] + image_two = responses[1] + self.assertEqual(len(image_one.labels), 3) + self.assertIsInstance(image_one.safe_searches, tuple) + self.assertEqual(image_two.safe_searches.adult, + Likelihood.VERY_UNLIKELY) + self.assertEqual(len(image_two.labels), 0) class TestVisionRequest(unittest.TestCase): From 002389c374a9940345d9d22e27e4f74f9c93b0d4 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Mon, 30 Jan 2017 11:57:22 -0500 Subject: [PATCH 2/2] Feedback updates. --- vision/google/cloud/vision/_gax.py | 11 ++++------- vision/google/cloud/vision/_http.py | 10 +++------- vision/google/cloud/vision/image.py | 14 +++++++------- vision/unit_tests/test__gax.py | 23 ++++++++++++++++------- vision/unit_tests/test__http.py | 4 +++- vision/unit_tests/test_client.py | 8 ++++++-- 6 files changed, 39 insertions(+), 31 deletions(-) diff --git a/vision/google/cloud/vision/_gax.py b/vision/google/cloud/vision/_gax.py index bef20f9d8049..e9eeaf33ab10 100644 --- a/vision/google/cloud/vision/_gax.py +++ b/vision/google/cloud/vision/_gax.py @@ -39,8 +39,9 @@ def annotate(self, image, features): :type features: list :param features: List of :class:`~google.cloud.vision.feature.Feature`. - :rtype: :class:`~google.cloud.vision.annotations.Annotations` or list - :returns: Instance of ``Annotations`` or list of ``Annotations``. + :rtype: list + :returns: List of + :class:`~google.cloud.vision.annotations.Annotations`. """ gapic_features = [_to_gapic_feature(feature) for feature in features] gapic_image = _to_gapic_image(image) @@ -49,11 +50,7 @@ def annotate(self, image, features): requests = [request] annotator_client = self._annotator_client responses = annotator_client.batch_annotate_images(requests).responses - if len(responses) == 1: - return Annotations.from_pb(responses[0]) - elif len(responses) > 1: - return [Annotations.from_pb(response) - for response in responses] + return [Annotations.from_pb(response) for response in responses] def _to_gapic_feature(feature): diff --git a/vision/google/cloud/vision/_http.py b/vision/google/cloud/vision/_http.py index b1347fdc3e4e..5846a2817519 100644 --- a/vision/google/cloud/vision/_http.py +++ b/vision/google/cloud/vision/_http.py @@ -41,8 +41,8 @@ def annotate(self, image, features): based on the number of Feature Types. See: https://cloud.google.com/vision/docs/pricing - :rtype: list or class:`~googe.cloud.vision.annotations.Annotations` - :returns: Instance of ``Annotations`` or List of ``Annotations``. + :rtype: list + :returns: List of :class:`~googe.cloud.vision.annotations.Annotations`. """ request = _make_request(image, features) @@ -50,11 +50,7 @@ def annotate(self, image, features): api_response = self._connection.api_request( method='POST', path='/images:annotate', data=data) responses = api_response.get('responses') - if len(responses) == 1: - return Annotations.from_api_repr(responses[0]) - elif len(responses) > 1: - return [Annotations.from_api_repr(response) - for response in responses] + return [Annotations.from_api_repr(response) for response in responses] def _make_request(image, features): diff --git a/vision/google/cloud/vision/image.py b/vision/google/cloud/vision/image.py index 9283e83ba960..87bf86e2f7e4 100644 --- a/vision/google/cloud/vision/image.py +++ b/vision/google/cloud/vision/image.py @@ -134,7 +134,7 @@ def detect_faces(self, limit=10): """ features = [Feature(FeatureTypes.FACE_DETECTION, limit)] annotations = self._detect_annotation(features) - return annotations.faces + return annotations[0].faces def detect_labels(self, limit=10): """Detect labels that describe objects in an image. @@ -147,7 +147,7 @@ def detect_labels(self, limit=10): """ features = [Feature(FeatureTypes.LABEL_DETECTION, limit)] annotations = self._detect_annotation(features) - return annotations.labels + return annotations[0].labels def detect_landmarks(self, limit=10): """Detect landmarks in an image. @@ -161,7 +161,7 @@ def detect_landmarks(self, limit=10): """ features = [Feature(FeatureTypes.LANDMARK_DETECTION, limit)] annotations = self._detect_annotation(features) - return annotations.landmarks + return annotations[0].landmarks def detect_logos(self, limit=10): """Detect logos in an image. @@ -175,7 +175,7 @@ def detect_logos(self, limit=10): """ features = [Feature(FeatureTypes.LOGO_DETECTION, limit)] annotations = self._detect_annotation(features) - return annotations.logos + return annotations[0].logos def detect_properties(self, limit=10): """Detect the color properties of an image. @@ -189,7 +189,7 @@ def detect_properties(self, limit=10): """ features = [Feature(FeatureTypes.IMAGE_PROPERTIES, limit)] annotations = self._detect_annotation(features) - return annotations.properties + return annotations[0].properties def detect_safe_search(self, limit=10): """Retreive safe search properties from an image. @@ -203,7 +203,7 @@ def detect_safe_search(self, limit=10): """ features = [Feature(FeatureTypes.SAFE_SEARCH_DETECTION, limit)] annotations = self._detect_annotation(features) - return annotations.safe_searches + return annotations[0].safe_searches def detect_text(self, limit=10): """Detect text in an image. @@ -217,4 +217,4 @@ def detect_text(self, limit=10): """ features = [Feature(FeatureTypes.TEXT_DETECTION, limit)] annotations = self._detect_annotation(features) - return annotations.texts + return annotations[0].texts diff --git a/vision/unit_tests/test__gax.py b/vision/unit_tests/test__gax.py index 70d7a83a2715..8e52b166e394 100644 --- a/vision/unit_tests/test__gax.py +++ b/vision/unit_tests/test__gax.py @@ -78,11 +78,15 @@ def test_annotate_no_results(self): gax_api._annotator_client = mock.Mock( spec_set=['batch_annotate_images'], **mock_response) with mock.patch('google.cloud.vision._gax.Annotations'): - self.assertIsNone(gax_api.annotate(image, [feature])) + response = gax_api.annotate(image, [feature]) + self.assertEqual(len(response), 0) + self.assertIsInstance(response, list) gax_api._annotator_client.batch_annotate_images.assert_called() def test_annotate_multiple_results(self): + from google.cloud.grpc.vision.v1 import image_annotator_pb2 + from google.cloud.vision.annotations import Annotations from google.cloud.vision.feature import Feature from google.cloud.vision.feature import FeatureTypes from google.cloud.vision.image import Image @@ -95,16 +99,21 @@ def test_annotate_multiple_results(self): 'ImageAnnotatorClient'): gax_api = self._make_one(client) - mock_response = { - 'batch_annotate_images.return_value': mock.Mock(responses=[1, 2]), - } + responses = [ + image_annotator_pb2.AnnotateImageResponse(), + image_annotator_pb2.AnnotateImageResponse(), + ] + response = image_annotator_pb2.BatchAnnotateImagesResponse( + responses=responses) gax_api._annotator_client = mock.Mock( - spec_set=['batch_annotate_images'], **mock_response) - with mock.patch('google.cloud.vision._gax.Annotations'): - responses = gax_api.annotate(image, [feature]) + spec_set=['batch_annotate_images']) + gax_api._annotator_client.batch_annotate_images.return_value = response + responses = gax_api.annotate(image, [feature]) self.assertEqual(len(responses), 2) + self.assertIsInstance(responses[0], Annotations) + self.assertIsInstance(responses[1], Annotations) gax_api._annotator_client.batch_annotate_images.assert_called() diff --git a/vision/unit_tests/test__http.py b/vision/unit_tests/test__http.py index 9481aeb14ff4..9293820915e6 100644 --- a/vision/unit_tests/test__http.py +++ b/vision/unit_tests/test__http.py @@ -44,7 +44,9 @@ def test_call_annotate_with_no_results(self): http_api = self._make_one(client) http_api._connection = mock.Mock(spec_set=['api_request']) http_api._connection.api_request.return_value = {'responses': []} - self.assertIsNone(http_api.annotate(image, [feature])) + response = http_api.annotate(image, [feature]) + self.assertEqual(len(response), 0) + self.assertIsInstance(response, list) def test_call_annotate_with_more_than_one_result(self): from google.cloud.vision.feature import Feature diff --git a/vision/unit_tests/test_client.py b/vision/unit_tests/test_client.py index f3f972c1f6cc..1224dabd0dea 100644 --- a/vision/unit_tests/test_client.py +++ b/vision/unit_tests/test_client.py @@ -104,8 +104,10 @@ def test_face_annotation(self): features = [Feature(feature_type=FeatureTypes.FACE_DETECTION, max_results=3)] image = client.image(content=IMAGE_CONTENT) - response = client._vision_api.annotate(image, features) + api_response = client._vision_api.annotate(image, features) + self.assertEqual(len(api_response), 1) + response = api_response[0] self.assertEqual(REQUEST, client._connection._requested[0]['data']) self.assertIsInstance(response, Annotations) @@ -166,8 +168,10 @@ def test_multiple_detection_from_content(self): logo_feature = Feature(FeatureTypes.LOGO_DETECTION, limit) features = [label_feature, logo_feature] image = client.image(content=IMAGE_CONTENT) - items = image.detect(features) + detected_items = image.detect(features) + self.assertEqual(len(detected_items), 1) + items = detected_items[0] self.assertEqual(len(items.logos), 2) self.assertEqual(len(items.labels), 3) first_logo = items.logos[0]