|
| 1 | +import sys |
| 2 | +from abc import ABC, abstractmethod |
| 3 | +from dataclasses import dataclass |
| 4 | +from typing import Iterable |
| 5 | + |
| 6 | +from nucleus.annotation import AnnotationList |
| 7 | +from nucleus.prediction import PredictionList |
| 8 | + |
| 9 | + |
| 10 | +@dataclass |
| 11 | +class MetricResult: |
| 12 | + """A Metric Result contains the value of an evaluation, as well as its weight. |
| 13 | + The weight is useful when aggregating metrics where each dataset item may hold a |
| 14 | + different relative weight. For example, when calculating precision over a dataset, |
| 15 | + the denominator of the precision is the number of annotations, and therefore the weight |
| 16 | + can be set as the number of annotations. |
| 17 | +
|
| 18 | + Attributes: |
| 19 | + value (float): The value of the evaluation result |
| 20 | + weight (float): The weight of the evaluation result. |
| 21 | + """ |
| 22 | + |
| 23 | + value: float |
| 24 | + weight: float = 1.0 |
| 25 | + |
| 26 | + @staticmethod |
| 27 | + def aggregate(results: Iterable["MetricResult"]) -> "MetricResult": |
| 28 | + """Aggregates results using a weighted average.""" |
| 29 | + results = list(filter(lambda x: x.weight != 0, results)) |
| 30 | + total_weight = sum([result.weight for result in results]) |
| 31 | + total_value = sum([result.value * result.weight for result in results]) |
| 32 | + value = total_value / max(total_weight, sys.float_info.epsilon) |
| 33 | + return MetricResult(value, total_weight) |
| 34 | + |
| 35 | + |
| 36 | +class Metric(ABC): |
| 37 | + """Abstract class for defining a metric, which takes a list of annotations |
| 38 | + and predictions and returns a scalar. |
| 39 | +
|
| 40 | + To create a new concrete Metric, override the `__call__` function |
| 41 | + with logic to define a metric between annotations and predictions. :: |
| 42 | +
|
| 43 | + from nucleus import BoxAnnotation, CuboidPrediction, Point3D |
| 44 | + from nucleus.annotation import AnnotationList |
| 45 | + from nucleus.prediction import PredictionList |
| 46 | + from nucleus.metrics import Metric, MetricResult |
| 47 | + from nucleus.metrics.polygon_utils import BoxOrPolygonAnnotation, BoxOrPolygonPrediction |
| 48 | +
|
| 49 | + class MyMetric(Metric): |
| 50 | + def __call__( |
| 51 | + self, annotations: AnnotationList, predictions: PredictionList |
| 52 | + ) -> MetricResult: |
| 53 | + value = (len(annotations) - len(predictions)) ** 2 |
| 54 | + weight = len(annotations) |
| 55 | + return MetricResult(value, weight) |
| 56 | +
|
| 57 | + box = BoxAnnotation( |
| 58 | + label="car", |
| 59 | + x=0, |
| 60 | + y=0, |
| 61 | + width=10, |
| 62 | + height=10, |
| 63 | + reference_id="image_1", |
| 64 | + annotation_id="image_1_car_box_1", |
| 65 | + metadata={"vehicle_color": "red"} |
| 66 | + ) |
| 67 | +
|
| 68 | + cuboid = CuboidPrediction( |
| 69 | + label="car", |
| 70 | + position=Point3D(100, 100, 10), |
| 71 | + dimensions=Point3D(5, 10, 5), |
| 72 | + yaw=0, |
| 73 | + reference_id="pointcloud_1", |
| 74 | + confidence=0.8, |
| 75 | + annotation_id="pointcloud_1_car_cuboid_1", |
| 76 | + metadata={"vehicle_color": "green"} |
| 77 | + ) |
| 78 | +
|
| 79 | + metric = MyMetric() |
| 80 | + annotations = AnnotationList(box_annotations=[box]) |
| 81 | + predictions = PredictionList(cuboid_predictions=[cuboid]) |
| 82 | + metric(annotations, predictions) |
| 83 | + """ |
| 84 | + |
| 85 | + @abstractmethod |
| 86 | + def __call__( |
| 87 | + self, annotations: AnnotationList, predictions: PredictionList |
| 88 | + ) -> MetricResult: |
| 89 | + """A metric must override this method and return a metric result, given annotations and predictions.""" |
0 commit comments