diff --git a/src/od_metrics/od_metrics.py b/src/od_metrics/od_metrics.py index 9051eb9..76d7231 100644 --- a/src/od_metrics/od_metrics.py +++ b/src/od_metrics/od_metrics.py @@ -16,7 +16,7 @@ import numpy as np from .constants import DEFAULT_COCO, _STANDARD_OUTPUT -from .utils import to_array, get_indexes, get_suffix, _Missing +from .utils import get_indexes, get_suffix, _Missing from .validators import ConstructorModel, ComputeModel, MeanModel @@ -899,8 +899,8 @@ def _get_mean( # Default default_value = { "iou_threshold": self.iou_thresholds, - "label_id": to_array(label_ids), - "area_range_key": to_array(list(self.area_ranges.keys())), + "label_id": np.array(label_ids), + "area_range_key": np.array(list(self.area_ranges.keys())), "max_detection_threshold": self.max_detection_thresholds, } diff --git a/src/od_metrics/utils.py b/src/od_metrics/utils.py index 202e5d1..6882b3a 100644 --- a/src/od_metrics/utils.py +++ b/src/od_metrics/utils.py @@ -4,12 +4,11 @@ __all__ = [ "_Missing", - "to_array", "get_indexes", "get_suffix", ] -from typing import Literal, Any +from typing import Literal import numpy as np @@ -17,32 +16,6 @@ class _Missing: """Sentinel class for missing values.""" -def to_array( - input_: Any, - ) -> np.ndarray: - """ - Trasform input to `np.ndarray`. - - Parameters - ---------- - input_ : Any | None, optional - Input to be converted. - - Returns - ------- - np.ndarray - Input converted to `np.ndarray`. - """ - if not isinstance(input_, np.ndarray): - output = np.array(input_) - else: - output = input_ - - if output.ndim == 0: - output = output.reshape(-1) - return output - - def get_indexes( array1: np.ndarray, array2: np.ndarray @@ -145,8 +118,10 @@ def to_xywh( return xyxy_xywh(bbox) if box_format == "cxcywh": return cxcywh_xywh(bbox) - raise ValueError("`box_format` can be `'xyxy'`, `'xywh'`, `'cxcywh'`. " - f"Found {box_format}") + raise ValueError( # pragma: no cover + "`box_format` can be `'xyxy'`, `'xywh'`, `'cxcywh'`. " + f"Found {box_format}" + ) def get_suffix( diff --git a/src/od_metrics/validators.py b/src/od_metrics/validators.py index efb0f56..fca0e38 100644 --- a/src/od_metrics/validators.py +++ b/src/od_metrics/validators.py @@ -227,8 +227,8 @@ def iou_recall_validator( or "default_value" not in info.context or info.field_name is None ): - raise ValueError("Missing required context or field name " - "information.") + raise ValueError( # pragma: no cover + "Missing required context or field name information.") return _common_validator( name=info.field_name, @@ -271,8 +271,8 @@ def max_detection_validator( or "default_value" not in info.context or info.field_name is None ): - raise ValueError("Missing required context or field name " - "information.") + raise ValueError( # pragma: no cover + "Missing required context or field name information.") return _common_validator( name=info.field_name, @@ -315,8 +315,8 @@ def area_ranges_validator( or "default_value" not in info.context or info.field_name is None ): - raise ValueError("Missing required context or field name " - "information.") + raise ValueError( # pragma: no cover + "Missing required context or field name information.") return _area_ranges_validator( name=info.field_name, @@ -680,8 +680,8 @@ def annotation_parser( Ground truth or predictions annotations. """ if info.context is None or "box_format" not in info.context: - raise ValueError("Missing required context or `box_format` " - "information.") + raise ValueError( # pragma: no cover + "Missing required context or `box_format` information.") box_format = info.context["box_format"] # y_true @@ -750,8 +750,8 @@ def iou_threshold_validator( or "default_value" not in info.context or info.field_name is None ): - raise ValueError("Missing required context or field name " - "information.") + raise ValueError( # pragma: no cover + "Missing required context or field name information.") return _common_validator( name=info.field_name, @@ -789,8 +789,8 @@ def area_range_key_validator( or "default_value" not in info.context or info.field_name is None ): - raise ValueError("Missing required context or field name " - "information.") + raise ValueError( # pragma: no cover + "Missing required context or field name information.") return _common_validator( name=info.field_name, @@ -828,8 +828,8 @@ def max_detection_label_id_validator( or "default_value" not in info.context or info.field_name is None ): - raise ValueError("Missing required context or field name " - "information.") + raise ValueError( # pragma: no cover + "Missing required context or field name information.") return _common_validator( name=info.field_name, diff --git a/tests/config.py b/tests/config.py index ff5b0ea..31e4111 100644 --- a/tests/config.py +++ b/tests/config.py @@ -678,10 +678,33 @@ }, ] -misc_tests = [ + +annotations_tests = [ { "compute_settings": {"extended_summary": True}, - "ids": "default_COCO", + "y_true": [ + {"labels": [0, 2], + "boxes": np.array([[17, 83, 97, 47], [57, 86, 96, 73]])} + ], + "y_pred": [ + {"labels": [0, 2], + "boxes": [[17, 83, 97, 47], [57, 86, 96, 73]], "scores": [.2, .3]} + ], + "ids": "annotations_boxes_numpy_array" + }, + { + "compute_settings": {"extended_summary": True}, + "y_true": [ + {"labels": [0, 2], + "boxes": np.array([[17, 83, 97, 47], [57, 86, 96, 73]]), + "area": np.array([4559, 7008]), + } + ], + "y_pred": [ + {"labels": [0, 2], + "boxes": [[17, 83, 97, 47], [57, 86, 96, 73]], "scores": [.2, .3]} + ], + "ids": "annotations_area_numpy_array" }, { "compute_settings": {"extended_summary": True}, @@ -689,7 +712,7 @@ "y_true": {"n_classes": 3}, "y_pred": {"n_classes": 7}, }, - "ids": "misc_default_COCO_different_classes_y_true_y_pred" + "ids": "annotations_exception_different_classes_y_true_y_pred" }, { "compute_settings": {"extended_summary": True}, @@ -698,7 +721,68 @@ "y_pred": {"n_images": 5}, }, "exceptions": {"compute": ValidationError}, - "ids": "misc_exception_compute_different_images" + "ids": "annotations_exception_different_images" + }, + { + "compute_settings": {"extended_summary": True}, + "y_true": [ + {"labels": [0], + "boxes": [[17, 83, 97, 47], [57, 86, 96, 73]]} + ], + "y_pred": [ + {"labels": [0, 2], + "boxes": [[17, 83, 97, 47], [57, 86, 96, 73]], "scores": [.2, .3]} + ], + "exceptions": {"compute": ValidationError}, + "ids": "annotations_exception_different_attributes_length" + }, + { + "compute_settings": {"extended_summary": True}, + "y_true": [ + {"labels": [0, 2], + "boxes": [[17, 83, 97], [57, 86, 96, 73]]} + ], + "y_pred": [ + {"labels": [0, 2], + "boxes": [[17, 83, 97, 47], [57, 86, 96, 73]], "scores": [.2, .3]} + ], + "exceptions": {"compute": ValidationError}, + "to_cover": {"pycoco_converter": False}, + "ids": "annotations_exception_boxes_length" + }, + { + "compute_settings": {"extended_summary": True}, + "y_true": [ + {"labels": [0, 2]} + ], + "y_pred": [ + {"labels": [0, 2], + "boxes": [[17, 83, 97, 47], [57, 86, 96, 73]], "scores": [.2, .3]} + ], + "exceptions": {"compute": ValidationError}, + "to_cover": {"pycoco_converter": False, "box_format_converter": False}, + "ids": "annotations_exception_ytrue_no_boxes" + }, + { + "compute_settings": {"extended_summary": True}, + "y_true": [ + {"labels": [0, 2], + "boxes": [[17, 83, 97, 47], [57, 86, 96, 73]]} + ], + "y_pred": [ + {"labels": [0, 2], "scores": [.2, .3]} + ], + "exceptions": {"compute": ValidationError}, + "to_cover": {"pycoco_converter": False, "box_format_converter": False}, + "ids": "annotations_exception_ypred_no_boxes" + }, + ] + + +misc_tests = [ + { + "compute_settings": {"extended_summary": True}, + "ids": "default_COCO", }, { "compute_settings": {"extended_summary": "yes"}, @@ -717,6 +801,7 @@ + objects_number_tests + objects_size_tests + mean_evaluator_tests + + annotations_tests + misc_tests ) @@ -750,4 +835,8 @@ "exceptions", {} ) + test_tmp["to_cover"] = test_tmp.get( + "to_cover", + {} + ) TESTS.append(test_tmp) diff --git a/tests/test_odmetrics.py b/tests/test_odmetrics.py index 4f8c508..d118d79 100644 --- a/tests/test_odmetrics.py +++ b/tests/test_odmetrics.py @@ -5,13 +5,14 @@ import unittest import copy from typing import Any, Literal +from functools import partial import numpy as np from parameterized import parameterized, parameterized_class from src.od_metrics import ODMetrics, iou from src.od_metrics.constants import DEFAULT_COCO from tests.utils import annotations_generator, pycoco_converter, \ - test_equality, rename_dict, xywh_to + test_equality, rename_dict, xywh_to, apply_function from tests.config import TESTS try: @@ -19,9 +20,11 @@ from pycocotools import mask as maskUtils from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval -except ImportError: - print("This unittest needs `pycocotools`. Please intall by " - "running `pip install pycocotools`") +except ImportError: # pragma: no cover + print( # pragma: no cover + "This unittest needs `pycocotools`. Please intall by " + "running `pip install pycocotools`" + ) @parameterized_class(TESTS) @@ -33,6 +36,9 @@ class TestBaseODMetrics(unittest.TestCase): annotations_settings: dict mean_evaluator_settings: dict exceptions: dict + y_true: list | None + y_pred: list | None + to_cover: dict def get_pycoco_params( self, @@ -130,10 +136,16 @@ def _test_summary( def test_equivalence(self) -> None: """Test equivalence: `od_metrics.ODMetrics` class and `pycocotools`.""" # Get annotations - y_true_od_metrics = annotations_generator( - **self.annotations_settings["y_true"]) - y_pred_od_metrics = annotations_generator( - **self.annotations_settings["y_pred"], include_score=True) + if getattr(self, "y_true", None): + y_true_od_metrics = self.y_true + else: + y_true_od_metrics = annotations_generator( + **self.annotations_settings["y_true"]) + if getattr(self, "y_pred", None): + y_pred_od_metrics = self.y_pred + else: + y_pred_od_metrics = annotations_generator( + **self.annotations_settings["y_pred"], include_score=True) # max detections: Only used for max_detections_thresholds=None case real_max_detections = ( max(detect["boxes"].shape[0] for detect in y_pred_od_metrics) @@ -143,8 +155,12 @@ def test_equivalence(self) -> None: ) # Prepare pycoco annotations - y_true_pycoco = pycoco_converter(y_true_od_metrics) - y_pred_pycoco = pycoco_converter(y_pred_od_metrics) + if self.to_cover.get("pycoco_converter", True): + y_true_pycoco = pycoco_converter(y_true_od_metrics) + y_pred_pycoco = pycoco_converter(y_pred_od_metrics) + else: + y_true_pycoco = None + y_pred_pycoco = None # Run Od_metrics evaluation # Init @@ -156,18 +172,16 @@ def test_equivalence(self) -> None: od_metrics_obj = ODMetrics(**self.metrics_settings) return # Box format - y_true_od_metrics = [ - ann | { - "boxes": [ - list(xywh_to(box, od_metrics_obj.box_format)) - for box in ann["boxes"]]} for ann in y_true_od_metrics - ] - y_pred_od_metrics = [ - ann | { - "boxes": [ - list(xywh_to(box, od_metrics_obj.box_format)) - for box in ann["boxes"]]} for ann in y_pred_od_metrics - ] + if self.to_cover.get("box_format_converter", True): + convert_fn = partial(xywh_to, box_format=od_metrics_obj.box_format) + y_true_od_metrics = [ + ann | {"boxes": apply_function(ann["boxes"], convert_fn)} + for ann in y_true_od_metrics + ] + y_pred_od_metrics = [ + ann | {"boxes": apply_function(ann["boxes"], convert_fn)} + for ann in y_pred_od_metrics + ] # Compute _compute_exception = self.exceptions.get("compute", None) @@ -208,58 +222,29 @@ def test_equivalence(self) -> None: pycoco_obj.summarize() # Test IoUs equivalence - _iou_excpetion = self.exceptions.get("iou", None) - if _iou_excpetion is None: - with self.subTest("Test IoU"): - self.assertTrue(self._test_ious( - od_metrics_ious=od_metrics_output["IoU"], - pycoco_ious=pycoco_obj.ious - ) + with self.subTest("Test IoU"): + self.assertTrue(self._test_ious( + od_metrics_ious=od_metrics_output["IoU"], + pycoco_ious=pycoco_obj.ious ) - else: - with self.assertRaises(_iou_excpetion): - with self.subTest("Test IoU"): - self._test_ious( - od_metrics_ious=od_metrics_output["IoU"], - pycoco_ious=pycoco_obj.ious - ) - return + ) # Test aggregate equivalence - _aggregate_exception = self.exceptions.get("aggregate", None) - if _aggregate_exception is None: - with self.subTest("Test aggregate"): - self.assertTrue(self._test_aggregate( - od_metrics_output=od_metrics_output, - pycoco_eval=pycoco_obj.eval - ) + with self.subTest("Test aggregate"): + self.assertTrue(self._test_aggregate( + od_metrics_output=od_metrics_output, + pycoco_eval=pycoco_obj.eval ) - else: - with self.assertRaises(_aggregate_exception): - self._test_aggregate( - od_metrics_output=od_metrics_output, - pycoco_eval=pycoco_obj.eval - ) - return + ) # Test summary equivalence - _summarize_exception = self.exceptions.get("summarize", None) - if _summarize_exception is None: - with self.subTest("Test summarize"): - self.assertTrue(self._test_summary( - od_metrics_output=od_metrics_output, - pycoco_stats=pycoco_obj.stats, - is_default_coco=is_default_coco, - ) + with self.subTest("Test summarize"): + self.assertTrue(self._test_summary( + od_metrics_output=od_metrics_output, + pycoco_stats=pycoco_obj.stats, + is_default_coco=is_default_coco, ) - else: - with self.assertRaises(_summarize_exception): - self._test_summary( - od_metrics_output=od_metrics_output, - pycoco_stats=pycoco_obj.stats, - is_default_coco=is_default_coco, - ) - return + ) # Test mean evalautor _mean_evaluator_exception = self.exceptions.get("mean_evaluator", None) diff --git a/tests/utils.py b/tests/utils.py index 8c1d69d..3339e04 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,9 +8,10 @@ "test_equality", "rename_dict", "xywh_to", + "apply_function", ] -from typing import Literal +from typing import Literal, Callable import random import numpy as np @@ -199,12 +200,13 @@ def test_equality( """ try: if isinstance(input1, np.int64): - input1 = int(input1) + input1 = int(input1) # pragma: no cover if isinstance(input2, np.int64): - input2 = int(input2) + input2 = int(input2) # pragma: no cover if not isinstance(input1, type(input2)): - raise ValueError(f"Found: {type(input1)} and {type(input2)}") + raise ValueError( # pragma: no cover + f"Found: {type(input1)} and {type(input2)}") checks = [] if isinstance(input1, np.ndarray) and isinstance(input2, np.ndarray): @@ -220,11 +222,11 @@ def test_equality( "length.") for el1, el2 in zip(input1, input2): - checks.append(test_equality(el1, el2)) - else: - checks.append(input1 == input2) - except AssertionError: - checks = [False] + checks.append(test_equality(el1, el2)) # pragma: no cover + else: # pragma: no cover + checks.append(input1 == input2) # pragma: no cover + except AssertionError: # pragma: no cover + checks = [False] # pragma: no cover return all(checks) @@ -330,5 +332,41 @@ def xywh_to( return xywh_xyxy(bbox) if box_format == "cxcywh": return xywh_cxcywh(bbox) - raise ValueError("`box_format` can be `'xyxy'`, `'xywh'`, `'cxcywh'`. " - f"Found {box_format}") + raise ValueError( # pragma: no cover + "`box_format` can be `'xyxy'`, `'xywh'`, `'cxcywh'`. " + f"Found {box_format}" + ) + + +def apply_function( + x: list | np.ndarray, + func: Callable, + ) -> list | np.ndarray: + """ + Apply a function to every element of a `list` or `np.ndarray`. + + Parameters + ---------- + x : list | np.ndarray + Input data. It can be a `list` or `np.ndarray`. + func : Callable + Function to apply to every element of input `x`. + + Raises + ------ + TypeError + If input is neither a `list` or `np.ndarray`. + + Returns + ------- + list | np.ndarray + Input with function applied to every element of `x`. + """ + if isinstance(x, list): + return [func(elem) for elem in x] + if isinstance(x, np.ndarray): + return np.array([func(elem) for elem in x]) + raise TypeError( # pragma: no cover + "Type not supported. Supported types are: `list` or" + f"`np.ndarray`. Found: {type(x)}" + )