diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index f4439fd..73d7075 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -24,19 +24,3 @@ jobs: - name: Run Pre-Commit run: |- pre-commit run --all-files - pylint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: "3.8" - - name: Upgrade pip - run: |- - python -m pip install -U pip - - name: Install pylint - run: |- - python -m pip install pylint - - name: Run pylint - run: |- - pylint -dfixme raillabel_providerkit || exit $(($? & ~24)) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1a3b919..281033c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -25,27 +25,16 @@ repos: - id: end-of-file-fixer - id: fix-byte-order-marker - id: trailing-whitespace - - repo: https://github.com/psf/black - rev: 24.10.0 - hooks: - - id: black - - repo: https://github.com/PyCQA/isort - rev: 5.13.2 - hooks: - - id: isort - - repo: https://github.com/PyCQA/docformatter - rev: v1.7.5 - hooks: - - id: docformatter - additional_dependencies: - - docformatter[tomli] - - repo: https://github.com/PyCQA/pydocstyle - rev: 6.3.0 + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.7.0 hooks: - - id: pydocstyle - exclude: '^tests/' - additional_dependencies: - - pydocstyle[toml] + - id: ruff-format + name: Run Formatter + - id: ruff + name: Run Linter + args: [ --fix ] + - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 hooks: @@ -89,6 +78,7 @@ repos: - LICENSES/.license_header.txt - --comment-style - '..| |' + - repo: https://github.com/fsfe/reuse-tool rev: v4.0.3 hooks: diff --git a/docs/source/conf.py b/docs/source/conf.py index c2c8d9f..fce386a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -63,12 +63,10 @@ # The full version, including alpha/beta/rc tags. version = raillabel_providerkit.__version__ -rst_epilog = """ +rst_epilog = f""" .. |Project| replace:: {project} .. |Version| replace:: {version} -""".format( - project=project, version=version -) +""" # -- Options for copy-button ------------------------------------------------- diff --git a/git-conventional-commits.json b/git-conventional-commits.json new file mode 100644 index 0000000..1da947a --- /dev/null +++ b/git-conventional-commits.json @@ -0,0 +1,20 @@ +{ + "convention" : { + "commitTypes": [ + "build", + "chore", + "ci", + "docs", + "feat", + "fix", + "merge", + "perf", + "refactor", + "revert", + "test", + + "lint" + ], + "commitScopes": [] + } +} diff --git a/git-conventional-commits.json.license b/git-conventional-commits.json.license new file mode 100644 index 0000000..fbb8ddf --- /dev/null +++ b/git-conventional-commits.json.license @@ -0,0 +1,2 @@ +SPDX-FileCopyrightText: Copyright DB InfraGO AG and the raillabel contributors +SPDX-License-Identifier: Apache-2.0 diff --git a/pyproject.toml b/pyproject.toml index 9f69206..c572fde 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,18 +57,31 @@ test = [ "json5" ] -[tool.black] -line-length = 100 -target-version = ["py38"] -force-exclude = "tests/" +[tool.ruff] +line-length = 101 -[tool.docformatter] -wrap-descriptions = 72 -wrap-summaries = 79 +[tool.ruff.lint] +exclude = ["tests/*", "docs/*"] +select = ["ALL"] +ignore = [ + "COM812", # conflicts with ruff formatter -[tool.isort] -profile = 'black' -line_length = 100 + "D100", # imo no docstrings are necessary in public modules + "D107", # __init__ docstrings are not necessary + "D203", # incompatible with D211 + "D213", # incompatible with D212 + + "FBT001", # flags in functions are not bad practice + "FBT002", # flags in functions are not bad practice + + "ISC001", # conflicts with ruff formatter + + "TCH001", # adds hard to understand compexity without providing a benefit for smaller projects + "TCH002", # same as TCH001 + "TCH003", # same as TCH001 + + "SIM103", # less readable in some cases imo +] [tool.mypy] check_untyped_defs = true @@ -78,82 +91,8 @@ warn_redundant_casts = true warn_unreachable = true python_version = "3.12" ignore_missing_imports = true - -[tool.pydocstyle] -convention = "numpy" -add-select = [ - "D212", # Multi-line docstring summary should start at the first line - "D402", # First line should not be the functions "signature" - "D417", # Missing argument descriptions in the docstring -] -add-ignore = [ - "D100", # Missing docstring in public module - "D201", # No blank lines allowed before function docstring # auto-formatting - "D202", # No blank lines allowed after function docstring # auto-formatting - "D203", # 1 blank line required before class docstring # auto-formatting - "D204", # 1 blank line required after class docstring # auto-formatting - "D209", # Multi-line docstring closing quotes should be on a separate line - "D211", # No blank lines allowed before class docstring # auto-formatting - "D213", # Multi-line docstring summary should start at the second line -] - -[tool.pylint.master] -max-line-length = 100 - -[tool.pylint.messages_control] -disable = [ - "arguments-renamed", - "global-statement", - "invalid-name", - "no-else-return", # using else returns is more readible imo - "protected-access", # class comparisons raised as false positive - "redefined-builtin", # the domain is full of builtin-names (object, type, format, ...) - "too-few-public-methods", # does not contribute to code quality imo - "too-many-arguments", # 6 as a limit is too low - "too-many-instance-attributes", # classes mirror OpenLABEL, therefore the number of fields is set - "unidiomatic-typecheck", # type() is necessary in some cases - "unspecified-encoding", # default encoding is sufficient in all cases - "unsupported-membership-test", # raise false positives for dicts - "global-variable-not-assigned", # raises false positive when global variable is a dict and items are assigned - - # Auto-formatting - "bad-indentation", - "inconsistent-quotes", - "missing-final-newline", - "missing-class-docstring", - "missing-function-docstring", - "missing-module-docstring", - "mixed-line-endings", - "multiple-imports", - "multiple-statements", - "trailing-newlines", - "trailing-whitespace", - "unexpected-line-ending-format", - "ungrouped-imports", - "wrong-import-order", - "wrong-import-position", - - # Handled by mypy - "arguments-differ", - "assignment-from-no-return", - "import-error", - "missing-kwoa", - "no-member", - "no-value-for-parameter", - "redundant-keyword-arg", - "signature-differs", - "syntax-error", - "too-many-function-args", - "unbalanced-tuple-unpacking", - "undefined-variable", - "unexpected-keyword-arg", -] -enable = [ - "c-extension-no-member", - "deprecated-pragma", - "use-symbolic-message-instead", - "useless-suppression", -] +disable_error_code = ["call-arg"] +exclude = ["tests/*"] [tool.pytest.ini_options] addopts = """ diff --git a/raillabel_providerkit/__init__.py b/raillabel_providerkit/__init__.py index 733f88e..0dd16a6 100644 --- a/raillabel_providerkit/__init__.py +++ b/raillabel_providerkit/__init__.py @@ -1,6 +1,7 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 """A library for annotation providers of raillabel-formatted data.""" + from importlib import metadata from . import format @@ -13,3 +14,10 @@ except metadata.PackageNotFoundError: __version__ = "0.0.0+unknown" del metadata + +__all__ = [ + "format", + "loader_classes", + "convert", + "validate", +] diff --git a/raillabel_providerkit/_util/_attribute_type.py b/raillabel_providerkit/_util/_attribute_type.py index ff8a7a6..fbe8aaa 100644 --- a/raillabel_providerkit/_util/_attribute_type.py +++ b/raillabel_providerkit/_util/_attribute_type.py @@ -1,9 +1,12 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from enum import Enum +from raillabel_providerkit.exceptions import ValueDoesNotMatchTypeError + class AttributeType(Enum): """Enum of all valid RailLabel attribute types.""" @@ -14,7 +17,7 @@ class AttributeType(Enum): VEC = "vec" @classmethod - def from_value(cls, attribute_value_class: t.Type) -> "AttributeType": + def from_value(cls, attribute_value_class: type) -> AttributeType: """Return AttributeType based on class of attribute value. Parameters @@ -31,22 +34,18 @@ def from_value(cls, attribute_value_class: t.Type) -> "AttributeType": ------ ValueError if attribute value class does not correspond to an Attribute Type. - """ - if attribute_value_class == str: + """ + if attribute_value_class is str: return AttributeType.TEXT - elif attribute_value_class in [float, int]: + if attribute_value_class in [float, int]: return AttributeType.NUM - elif attribute_value_class == bool: + if attribute_value_class is bool: return AttributeType.BOOLEAN - elif attribute_value_class in [list, tuple]: + if attribute_value_class in [list, tuple]: return AttributeType.VEC - else: - raise ValueError( - f"Type {attribute_value_class} does not correspond to a valid RailLabel attribute " - + "type. Supported types are str, float, int, bool, list, tuple." - ) + raise ValueDoesNotMatchTypeError(attribute_value_class) diff --git a/raillabel_providerkit/_util/_warning.py b/raillabel_providerkit/_util/_warning.py index d501e0d..7e4d054 100644 --- a/raillabel_providerkit/_util/_warning.py +++ b/raillabel_providerkit/_util/_warning.py @@ -1,16 +1,18 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + import logging import typing as t from io import StringIO +from types import TracebackType class _WarningsLogger: + warnings: t.ClassVar[list[str]] = [] - warnings: t.List[str] = [] - - def __enter__(self) -> "_WarningsLogger": + def __enter__(self) -> None: logger = logging.getLogger("loader_warnings") warnings_stream = StringIO() handler = logging.StreamHandler(warnings_stream) @@ -19,7 +21,12 @@ def __enter__(self) -> "_WarningsLogger": return self - def __exit__(self, exc_type, exc_value, traceback): + def __exit__( + self, + typ: type[BaseException] | None, + exc: BaseException | None, + tb: TracebackType | None, + ) -> None: logger = logging.getLogger("loader_warnings") stream = logger.handlers[-1].stream stream.seek(0) diff --git a/raillabel_providerkit/convert/convert.py b/raillabel_providerkit/convert/convert.py index 74a4e46..753e5c9 100644 --- a/raillabel_providerkit/convert/convert.py +++ b/raillabel_providerkit/convert/convert.py @@ -1,16 +1,17 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations import raillabel -from ..exceptions import UnsupportedFormatError +from raillabel_providerkit.exceptions import UnsupportedFormatError + from . import loader_classes as loader_classes_pkg from .loader_classes import LoaderABC -def convert(data: dict, loader_class: t.Optional[t.Type[LoaderABC]] = None) -> raillabel.Scene: +def convert(data: dict, loader_class: type[LoaderABC] | None = None) -> raillabel.Scene: """Convert annotation data from provider formats into raillabel. Parameters @@ -30,23 +31,23 @@ def convert(data: dict, loader_class: t.Optional[t.Type[LoaderABC]] = None) -> r ------ raillabel.UnsupportedFormatError if the annotation file does not match any loaders. - """ + """ if loader_class is None: loader_class = _select_loader_class(data) return loader_class().load(data) -def _select_loader_class(data: dict) -> t.Type[LoaderABC]: - loader_classes = [] - for cls in loader_classes_pkg.__dict__.values(): - if isinstance(cls, type) and issubclass(cls, LoaderABC) and cls != LoaderABC: - loader_classes.append(cls) +def _select_loader_class(data: dict) -> type[LoaderABC]: + loader_classes = [ + cls + for cls in loader_classes_pkg.__dict__.values() + if isinstance(cls, type) and issubclass(cls, LoaderABC) and cls != LoaderABC + ] for loader_class in loader_classes: - if loader_class().supports(data): return loader_class - raise UnsupportedFormatError("No loader could be found, that supported the provided data.") + raise UnsupportedFormatError diff --git a/raillabel_providerkit/convert/loader_classes/__init__.py b/raillabel_providerkit/convert/loader_classes/__init__.py index 72a3266..2849708 100644 --- a/raillabel_providerkit/convert/loader_classes/__init__.py +++ b/raillabel_providerkit/convert/loader_classes/__init__.py @@ -9,10 +9,11 @@ from ._loader_abc import LoaderABC +__all__ = ["LoaderABC"] + # iterate through the modules in the current package package_dir = str(Path(__file__).resolve().parent) for _, module_name, _ in iter_modules([package_dir]): - # import the module and iterate through its attributes module = import_module(f"{__name__}.{module_name}") for attribute_name in dir(module): diff --git a/raillabel_providerkit/convert/loader_classes/_loader_abc.py b/raillabel_providerkit/convert/loader_classes/_loader_abc.py index 3d6eaaa..ab718e9 100644 --- a/raillabel_providerkit/convert/loader_classes/_loader_abc.py +++ b/raillabel_providerkit/convert/loader_classes/_loader_abc.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from abc import ABC, abstractmethod from pathlib import Path @@ -22,10 +23,11 @@ class LoaderABC(ABC): List of warning strings, that have been found during the execution of load(). SCHEMA_PATH: Path Absolute path to the JSON schema. + """ scene: raillabel.Scene - warnings: t.List[str] + warnings: list[str] SCHEMA_PATH: Path @abstractmethod @@ -47,6 +49,7 @@ def load(self, data: dict, validate: bool = True) -> raillabel.Scene: ------- scene: raillabel.Scene The loaded scene with the data. + """ raise NotImplementedError @@ -66,5 +69,6 @@ def supports(self, data: dict) -> bool: ------- bool: If True, the Loader class is suitable for the data. + """ raise NotImplementedError diff --git a/raillabel_providerkit/convert/loader_classes/loader_understand_ai.py b/raillabel_providerkit/convert/loader_classes/loader_understand_ai.py index eab5d38..84a5af6 100644 --- a/raillabel_providerkit/convert/loader_classes/loader_understand_ai.py +++ b/raillabel_providerkit/convert/loader_classes/loader_understand_ai.py @@ -1,15 +1,17 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + import json -import typing as t from pathlib import Path import jsonschema import raillabel -from ..._util._warning import _WarningsLogger -from ...format import understand_ai as uai_format +from raillabel_providerkit._util._warning import _WarningsLogger +from raillabel_providerkit.format import understand_ai as uai_format + from ._loader_abc import LoaderABC @@ -22,10 +24,11 @@ class LoaderUnderstandAi(LoaderABC): Loaded raillabel.format.understand_ai.Scene with the data. warnings: t.List[str] List of warning strings, that have been found during the execution of load(). + """ scene: uai_format.Scene - warnings: t.List[str] + warnings: list[str] SCHEMA_PATH: Path = ( Path(__file__).parent.parent.parent / "format" / "understand_ai_t4_schema.json" @@ -47,8 +50,8 @@ def load(self, data: dict, validate_schema: bool = False) -> uai_format.Scene: ------- scene: raillabel.format.understand_ai.UAIScene The loaded scene with the data. - """ + """ if validate_schema: self.validate_schema(data) @@ -74,8 +77,8 @@ def supports(self, data: dict) -> bool: ------- bool: If True, the Loader class is suitable for the data. - """ + """ return ( "metadata" in data and "project_id" in data["metadata"] @@ -83,15 +86,13 @@ def supports(self, data: dict) -> bool: and "frames" in data ) - def validate_schema(self, data: dict) -> t.List[str]: + def validate_schema(self, data: dict) -> list[str]: """Check if the schema is correct.""" with self.SCHEMA_PATH.open() as file: schema = json.load(file) validator = jsonschema.Draft7Validator(schema=schema) - schema_errors = [] - - for error in validator.iter_errors(data): - schema_errors.append("$" + error.json_path[1:] + ": " + str(error.message)) - - return schema_errors + return [ + "$" + error.json_path[1:] + ": " + str(error.message) + for error in validator.iter_errors(data) + ] diff --git a/raillabel_providerkit/exceptions.py b/raillabel_providerkit/exceptions.py index 47ff8f7..eb94cb3 100644 --- a/raillabel_providerkit/exceptions.py +++ b/raillabel_providerkit/exceptions.py @@ -7,6 +7,21 @@ class UnsupportedFormatError(Exception): __module__ = "raillabel_providerkit" + def __init__(self) -> None: + super().__init__("No loader could be found, that supported the provided data.") + + +class ValueDoesNotMatchTypeError(Exception): + """Raised when the expected type of a field does not match its value.""" + + __module__ = "raillabel_providerkit" + + def __init__(self, attribute_value_class: type) -> None: + super().__init__( + f"Type {attribute_value_class} does not correspond to a valid RailLabel attribute " + "type. Supported types are str, float, int, bool, list, tuple." + ) + class SchemaError(Exception): """Raised when the data does not validate against a given schema.""" diff --git a/raillabel_providerkit/format/__init__.py b/raillabel_providerkit/format/__init__.py index 834ba5c..86d1cfa 100644 --- a/raillabel_providerkit/format/__init__.py +++ b/raillabel_providerkit/format/__init__.py @@ -4,3 +4,5 @@ from . import understand_ai from . import understand_ai as uai + +__all__ = ["uai", "understand_ai"] diff --git a/raillabel_providerkit/format/understand_ai/__init__.py b/raillabel_providerkit/format/understand_ai/__init__.py index cbec19e..0e1e7aa 100644 --- a/raillabel_providerkit/format/understand_ai/__init__.py +++ b/raillabel_providerkit/format/understand_ai/__init__.py @@ -15,3 +15,19 @@ from .segmentation_3d import Segmentation3d from .sensor_reference import SensorReference from .size_3d import Size3d + +__all__ = [ + "BoundingBox2d", + "BoundingBox3d", + "CoordinateSystem", + "Frame", + "Metadata", + "Point3d", + "Polygon2d", + "Polyline2d", + "Quaternion", + "Scene", + "Segmentation3d", + "SensorReference", + "Size3d", +] diff --git a/raillabel_providerkit/format/understand_ai/_annotation.py b/raillabel_providerkit/format/understand_ai/_annotation.py index 22b400c..b8f2bbd 100644 --- a/raillabel_providerkit/format/understand_ai/_annotation.py +++ b/raillabel_providerkit/format/understand_ai/_annotation.py @@ -1,36 +1,32 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t -from abc import ABC, abstractmethod, abstractproperty +from __future__ import annotations + +from abc import ABC, abstractmethod from dataclasses import dataclass from uuid import UUID -from ..._util._attribute_type import AttributeType +from raillabel_providerkit._util._attribute_type import AttributeType + from ._translation import translate_class_id, translate_sensor_id from .sensor_reference import SensorReference @dataclass class _Annotation(ABC): - id: UUID object_id: UUID class_name: str attributes: dict sensor: SensorReference - @property - @abstractproperty - def OPENLABEL_ID(self) -> t.List[str]: - raise NotImplementedError - @classmethod @abstractmethod - def fromdict(cls, data_dict: t.Dict) -> t.Type["_Annotation"]: + def fromdict(cls, data_dict: dict) -> type[_Annotation]: raise NotImplementedError - def to_raillabel(self) -> t.Tuple[dict, str, str, dict]: + def to_raillabel(self) -> tuple[dict, str, str, dict]: """Convert to a raillabel compatible dict. Returns @@ -43,8 +39,8 @@ def to_raillabel(self) -> t.Tuple[dict, str, str, dict]: Friendly identifier of the class the annotated object belongs to. sensor_reference: dict Dictionary of the sensor reference. - """ + """ return ( { "name": str(self.id), @@ -58,11 +54,9 @@ def to_raillabel(self) -> t.Tuple[dict, str, str, dict]: ) def _attributes_to_raillabel(self) -> dict: - attributes = {} for attr_name, attr_value in self.attributes.items(): - attr_type = AttributeType.from_value(type(attr_value)).value if attr_type not in attributes: diff --git a/raillabel_providerkit/format/understand_ai/_translation.py b/raillabel_providerkit/format/understand_ai/_translation.py index bbbb688..7b30e40 100644 --- a/raillabel_providerkit/format/understand_ai/_translation.py +++ b/raillabel_providerkit/format/understand_ai/_translation.py @@ -17,6 +17,7 @@ def translate_sensor_id(original_sensor_id: str) -> str: ------- str Translated id or original_sensor_id, if no translation could be found. + """ return TRANSLATION["streams"].get(original_sensor_id, original_sensor_id) @@ -33,6 +34,7 @@ def translate_class_id(original_class_id: str) -> str: ------- str Translated id or original_class_id, if no translation could be found. + """ return TRANSLATION["classes"].get(original_class_id, original_class_id) @@ -49,6 +51,7 @@ def fetch_sensor_type(sensor_id: str) -> str: ------- str Sensor type or 'other' if sensor_id not found in translation.json. + """ return TRANSLATION["stream_types"].get(sensor_id, "other") @@ -67,25 +70,21 @@ def fetch_sensor_resolutions(sensor_id: str) -> dict: Dictionary containing the resolution information. Key 'x' contains the width in pixels, key 'y' contains the height in pixels. If the sensor is a radar, 'resolution_px_per_m' is also included. + """ return TRANSLATION["stream_resolutions"].get( sensor_id, {"x": None, "y": None, "resolution_px_per_m": None} ) -def _load_translation(): +def _load_translation() -> None: """Load the translation file when the module is imported. This prevents it from beeing loaded for every annotation. """ - - global TRANSLATION - translatiion_path = Path(__file__).parent.parent.parent / "convert" / "translation.json" with translatiion_path.open() as translation_file: - TRANSLATION = json.load(translation_file) - + return json.load(translation_file) -TRANSLATION = {} -_load_translation() +TRANSLATION = _load_translation() diff --git a/raillabel_providerkit/format/understand_ai/bounding_box_2d.py b/raillabel_providerkit/format/understand_ai/bounding_box_2d.py index d66994c..2bb7269 100644 --- a/raillabel_providerkit/format/understand_ai/bounding_box_2d.py +++ b/raillabel_providerkit/format/understand_ai/bounding_box_2d.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from uuid import UUID @@ -34,6 +35,7 @@ class BoundingBox2d(_Annotation): Right corner of the bounding box in pixels. y_max: float Bottom corner of the bounding box in pixels. + """ x_min: float @@ -44,7 +46,7 @@ class BoundingBox2d(_Annotation): OPENLABEL_ID = "bbox" @classmethod - def fromdict(cls, data_dict: t.Dict) -> "BoundingBox2d": + def fromdict(cls, data_dict: dict) -> BoundingBox2d: """Generate a BoundingBox2d from a dictionary in the UAI format. Parameters @@ -56,8 +58,8 @@ def fromdict(cls, data_dict: t.Dict) -> "BoundingBox2d": ------- BoundingBox2d Converted 2d bounding box. - """ + """ return BoundingBox2d( id=UUID(data_dict["id"]), object_id=UUID(data_dict["objectId"]), @@ -70,7 +72,7 @@ def fromdict(cls, data_dict: t.Dict) -> "BoundingBox2d": sensor=SensorReference.fromdict(data_dict["sensor"]), ) - def _val_to_raillabel(self) -> list: + def _val_to_raillabel(self) -> list[float]: return [ (self.x_max + self.x_min) / 2, (self.y_max + self.y_min) / 2, diff --git a/raillabel_providerkit/format/understand_ai/bounding_box_3d.py b/raillabel_providerkit/format/understand_ai/bounding_box_3d.py index 914e4b4..0573248 100644 --- a/raillabel_providerkit/format/understand_ai/bounding_box_3d.py +++ b/raillabel_providerkit/format/understand_ai/bounding_box_3d.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from uuid import UUID @@ -35,6 +36,7 @@ class BoundingBox3d(_Annotation): 3d size of the bounding box. quaternion: raillabel.format.understand_ai.Quaternion Rotation quaternion of the bounding box. + """ center: Point3d @@ -44,7 +46,7 @@ class BoundingBox3d(_Annotation): OPENLABEL_ID = "cuboid" @classmethod - def fromdict(cls, data_dict: t.Dict) -> "BoundingBox3d": + def fromdict(cls, data_dict: dict) -> BoundingBox3d: """Generate a BoundingBox3d from a dictionary in the UAI format. Parameters @@ -56,8 +58,8 @@ def fromdict(cls, data_dict: t.Dict) -> "BoundingBox3d": ------- BoundingBox3d Converted 3d bounding box. - """ + """ return BoundingBox3d( id=UUID(data_dict["id"]), object_id=UUID(data_dict["objectId"]), @@ -69,7 +71,7 @@ def fromdict(cls, data_dict: t.Dict) -> "BoundingBox3d": sensor=SensorReference.fromdict(data_dict["sensor"]), ) - def _val_to_raillabel(self) -> list: + def _val_to_raillabel(self) -> list[float]: return [ float(self.center.x), float(self.center.y), diff --git a/raillabel_providerkit/format/understand_ai/coordinate_system.py b/raillabel_providerkit/format/understand_ai/coordinate_system.py index eae9f3b..f0f39b2 100644 --- a/raillabel_providerkit/format/understand_ai/coordinate_system.py +++ b/raillabel_providerkit/format/understand_ai/coordinate_system.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from ._translation import fetch_sensor_resolutions, fetch_sensor_type, translate_sensor_id @@ -35,19 +36,20 @@ class CoordinateSystem: dist_coeffs: list of float, optional Distortion coefficients of the sensor. Only applies to sensors of type camera. Default is None. + """ uid: str topic: str frame_id: str - position: t.List[float] - rotation_quaternion: t.List[float] - rotation_matrix: t.List[float] - angle_axis_rotation: t.List[float] - homogeneous_transform: t.Optional[t.List[float]] = None - measured_position: t.Optional[t.List[float]] = None - camera_matrix: t.Optional[t.List[float]] = None - dist_coeffs: t.Optional[t.List[float]] = None + position: list[float] + rotation_quaternion: list[float] + rotation_matrix: list[float] + angle_axis_rotation: list[float] + homogeneous_transform: list[float] | None = None + measured_position: list[float] | None = None + camera_matrix: list[float] | None = None + dist_coeffs: list[float] | None = None @property def translated_uid(self) -> str: @@ -55,7 +57,7 @@ def translated_uid(self) -> str: return translate_sensor_id(self.uid) @classmethod - def fromdict(cls, data_dict: dict) -> "CoordinateSystem": + def fromdict(cls, data_dict: dict) -> CoordinateSystem: """Generate a CoordinateSystem from a dictionary in the UAI format. Parameters @@ -67,8 +69,8 @@ def fromdict(cls, data_dict: dict) -> "CoordinateSystem": ------- coordinate_system: CoordinateSystem Converted coordinate_system. - """ + """ return CoordinateSystem( uid=data_dict["coordinate_system_id"], topic=data_dict["topic"], @@ -83,7 +85,7 @@ def fromdict(cls, data_dict: dict) -> "CoordinateSystem": dist_coeffs=data_dict.get("dist_coeffs"), ) - def to_raillabel(self) -> t.Tuple[dict, dict]: + def to_raillabel(self) -> tuple[dict, dict]: """Convert to a raillabel compatible dict. Returns @@ -92,8 +94,8 @@ def to_raillabel(self) -> t.Tuple[dict, dict]: Dictionary of the raillabel coordinate system. stream_dict: dict Dictionary of the raillabel stream. - """ + """ stream_dict = { "type": "sensor", "parent": "base", @@ -116,9 +118,8 @@ def to_raillabel(self) -> t.Tuple[dict, dict]: return stream_dict, coordinate_system_dict - def _stream_properties_to_raillabel(self, type: str) -> t.Optional[dict]: - - if type == "camera": + def _stream_properties_to_raillabel(self, sensor_type: str) -> dict | None: + if sensor_type == "camera": return { "intrinsics_pinhole": { "camera_matrix": self._convert_camera_matrix(self.camera_matrix[:]), @@ -128,7 +129,7 @@ def _stream_properties_to_raillabel(self, type: str) -> t.Optional[dict]: } } - elif type == "radar": + if sensor_type == "radar": return { "intrinsics_radar": { "resolution_px_per_m": fetch_sensor_resolutions(self.translated_uid)[ @@ -139,11 +140,9 @@ def _stream_properties_to_raillabel(self, type: str) -> t.Optional[dict]: } } - else: - return None + return None def _convert_camera_matrix(self, camera_matrix: list) -> list: - camera_matrix.insert(9, 0) camera_matrix.insert(6, 0) camera_matrix.insert(3, 0) diff --git a/raillabel_providerkit/format/understand_ai/frame.py b/raillabel_providerkit/format/understand_ai/frame.py index 172c162..d82959b 100644 --- a/raillabel_providerkit/format/understand_ai/frame.py +++ b/raillabel_providerkit/format/understand_ai/frame.py @@ -1,12 +1,14 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + import uuid from dataclasses import dataclass from decimal import Decimal -from ..._util._warning import _warning +from raillabel_providerkit._util._warning import _warning + from ._annotation import _Annotation from ._translation import translate_class_id, translate_sensor_id from .bounding_box_2d import BoundingBox2d @@ -30,17 +32,18 @@ class Frame: Dictionary containing all annotations. The keys are the uids of the annotations and the values are objects of type BoundingBox2d, BoundingBox3d, Polygon2d, Polyline2d or Segementation3d. + """ id: int timestamp: Decimal - bounding_box_2ds: t.Dict[str, BoundingBox2d] - bounding_box_3ds: t.Dict[str, BoundingBox3d] - polygon_2ds: t.Dict[str, Polygon2d] - polyline_2ds: t.Dict[str, Polyline2d] - segmentation_3ds: t.Dict[str, Segmentation3d] + bounding_box_2ds: dict[str, BoundingBox2d] + bounding_box_3ds: dict[str, BoundingBox3d] + polygon_2ds: dict[str, Polygon2d] + polyline_2ds: dict[str, Polyline2d] + segmentation_3ds: dict[str, Segmentation3d] - _annotation_uids: t.Set[str] = None + _annotation_uids: set[str] = None @property def annotations(self) -> dict: @@ -62,6 +65,7 @@ def translated_objects(self) -> dict: dict Dictionary containing all objects. Keys are the object IDs and values are the translated class names. + """ return { str(a.object_id): translate_class_id(a.class_name) for a in self.annotations.values() @@ -76,6 +80,7 @@ def translated_sensors(self) -> dict: dict Dictionary containing all sensors. Keys are the translated sensor IDs and values are the SensorReference objects. + """ sensors_list = [] @@ -86,7 +91,7 @@ def translated_sensors(self) -> dict: return {sensor.type: sensor for sensor in sensors_list} @classmethod - def fromdict(cls, data_dict: dict) -> "Frame": + def fromdict(cls, data_dict: dict) -> Frame: """Generate a Frame from a dictionary in the UAI format. Parameters @@ -98,8 +103,8 @@ def fromdict(cls, data_dict: dict) -> "Frame": ------- Frame Converted frame. - """ + """ cls._annotation_uids = set() return Frame( @@ -132,6 +137,7 @@ def to_raillabel(self) -> dict: ------- Frame Converted frame. + """ return { "frame_properties": self._frame_properties_to_raillabel(), @@ -140,9 +146,8 @@ def to_raillabel(self) -> dict: @classmethod def _annotation_fromdict( - cls, data_dict: dict, annotation_class: t.Type[_Annotation] - ) -> t.Dict[str, t.Type[_Annotation]]: - + cls, data_dict: dict, annotation_class: type[_Annotation] + ) -> dict[str, type[_Annotation]]: annotations = {} for annotation_dict in data_dict: annotation_dict["id"] = cls._check_duplicate_annotation_uid(annotation_dict["id"]) @@ -152,7 +157,6 @@ def _annotation_fromdict( @classmethod def _check_duplicate_annotation_uid(cls, uid: str) -> str: - if uid in cls._annotation_uids: _warning( f"Annotation uid {uid} is contained more than once. A new uid will be assigned." @@ -163,7 +167,6 @@ def _check_duplicate_annotation_uid(cls, uid: str) -> str: return uid def _frame_properties_to_raillabel(self) -> dict: - streams_dict = {} for stream_id, stream in self.translated_sensors.items(): streams_dict[stream_id] = { @@ -182,7 +185,6 @@ def _objects_to_raillabel(self) -> dict: object_data = {} for annotation in self.annotations.values(): - object_id = str(annotation.object_id) if object_id not in object_data: diff --git a/raillabel_providerkit/format/understand_ai/metadata.py b/raillabel_providerkit/format/understand_ai/metadata.py index 59b954e..69f2507 100644 --- a/raillabel_providerkit/format/understand_ai/metadata.py +++ b/raillabel_providerkit/format/understand_ai/metadata.py @@ -26,6 +26,7 @@ class Metadata: coordinate_system_reference: str folder_name: str Directory with the exported reference data (e.g. images, point clouds). + """ clip_id: str @@ -50,8 +51,8 @@ def fromdict(cls, data_dict: dict) -> "Metadata": ------- metadata: Metadata Converted metadata. - """ + """ return Metadata( clip_id=data_dict["clip_id"], external_clip_id=data_dict["external_clip_id"], @@ -70,8 +71,8 @@ def to_raillabel(self) -> dict: ------- metadata: dict Converted metadata. - """ + """ return { "name": self.external_clip_id, "schema_version": "1.0.0", @@ -81,11 +82,9 @@ def to_raillabel(self) -> dict: } def _get_subschema_version(self) -> str: - RAILLABEL_SCHEMA_PATH = ( + raillabel_schema_path = ( Path(__file__).parent.parent.parent / "format" / "raillabel_schema.json" ) - with RAILLABEL_SCHEMA_PATH.open() as schema_file: - subschema_version = json.load(schema_file)["version"] - - return subschema_version + with raillabel_schema_path.open() as schema_file: + return json.load(schema_file)["version"] diff --git a/raillabel_providerkit/format/understand_ai/point_3d.py b/raillabel_providerkit/format/understand_ai/point_3d.py index 2fe0817..1ffc87e 100644 --- a/raillabel_providerkit/format/understand_ai/point_3d.py +++ b/raillabel_providerkit/format/understand_ai/point_3d.py @@ -16,6 +16,7 @@ class Point3d: Position of the object in the y-dimension. z: float Position of the object in the z-dimension. + """ x: float @@ -35,8 +36,8 @@ def fromdict(cls, data_dict: dict) -> "Point3d": ------- Point3d Converted 3d point. - """ + """ return Point3d( x=float(data_dict["x"]), y=float(data_dict["y"]), diff --git a/raillabel_providerkit/format/understand_ai/polygon_2d.py b/raillabel_providerkit/format/understand_ai/polygon_2d.py index e8128ff..1027e3a 100644 --- a/raillabel_providerkit/format/understand_ai/polygon_2d.py +++ b/raillabel_providerkit/format/understand_ai/polygon_2d.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from uuid import UUID @@ -28,14 +29,15 @@ class Polygon2d(_Annotation): Information about the sensor this annotation is labeled in. points: list[tuple[float, float]] 2d points belonging to the polygon. + """ - points: t.List[t.Tuple[float, float]] + points: list[tuple[float, float]] OPENLABEL_ID = "poly2d" @classmethod - def fromdict(cls, data_dict: t.Dict) -> "Polygon2d": + def fromdict(cls, data_dict: dict) -> Polygon2d: """Generate a Polygon2d from a dictionary in the UAI format. Parameters @@ -47,8 +49,8 @@ def fromdict(cls, data_dict: t.Dict) -> "Polygon2d": ------- Polygon2d Converted 2d polygon. - """ + """ return Polygon2d( id=UUID(data_dict["id"]), object_id=UUID(data_dict["objectId"]), @@ -58,7 +60,7 @@ def fromdict(cls, data_dict: t.Dict) -> "Polygon2d": points=[(p[0], p[1]) for p in data_dict["geometry"]["points"]], ) - def to_raillabel(self) -> t.Tuple[dict, str, str, dict]: + def to_raillabel(self) -> tuple[dict, str, str, dict]: """Convert to a raillabel compatible dict. Returns @@ -71,13 +73,13 @@ def to_raillabel(self) -> t.Tuple[dict, str, str, dict]: Friendly identifier of the class the annotated object belongs to. sensor_reference: dict Dictionary of the sensor reference. - """ + """ polygon = super().to_raillabel() polygon[0]["closed"] = True polygon[0]["mode"] = "MODE_POLY2D_ABSOLUTE" return polygon - def _val_to_raillabel(self) -> t.List[float]: + def _val_to_raillabel(self) -> list[float]: return [coordinate for point in self.points for coordinate in point] diff --git a/raillabel_providerkit/format/understand_ai/polyline_2d.py b/raillabel_providerkit/format/understand_ai/polyline_2d.py index 15d8af6..3e753af 100644 --- a/raillabel_providerkit/format/understand_ai/polyline_2d.py +++ b/raillabel_providerkit/format/understand_ai/polyline_2d.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from uuid import UUID @@ -28,14 +29,15 @@ class Polyline2d(_Annotation): Information about the sensor this annotation is labeled in. points: list[tuple[float, float]] 2d points belonging to the polyline. + """ - points: t.List[t.Tuple[float, float]] + points: list[tuple[float, float]] OPENLABEL_ID = "poly2d" @classmethod - def fromdict(cls, data_dict: t.Dict) -> "Polyline2d": + def fromdict(cls, data_dict: dict) -> Polyline2d: """Generate a Polyline2d from a dictionary in the UAI format. Parameters @@ -47,8 +49,8 @@ def fromdict(cls, data_dict: t.Dict) -> "Polyline2d": ------- Polyline2d Converted 2d polyline. - """ + """ return Polyline2d( id=UUID(data_dict["id"]), object_id=UUID(data_dict["objectId"]), @@ -58,7 +60,7 @@ def fromdict(cls, data_dict: t.Dict) -> "Polyline2d": points=[(p[0], p[1]) for p in data_dict["geometry"]["points"]], ) - def to_raillabel(self) -> t.Tuple[dict, str, str, dict]: + def to_raillabel(self) -> tuple[dict, str, str, dict]: """Convert to a raillabel compatible dict. Returns @@ -71,13 +73,13 @@ def to_raillabel(self) -> t.Tuple[dict, str, str, dict]: Friendly identifier of the class the annotated object belongs to. sensor_reference: dict Dictionary of the sensor reference. - """ + """ polyline = super().to_raillabel() polyline[0]["closed"] = False polyline[0]["mode"] = "MODE_POLY2D_ABSOLUTE" return polyline - def _val_to_raillabel(self) -> t.List[float]: + def _val_to_raillabel(self) -> list[float]: return [coordinate for point in self.points for coordinate in point] diff --git a/raillabel_providerkit/format/understand_ai/quaternion.py b/raillabel_providerkit/format/understand_ai/quaternion.py index 8b68a89..52a5299 100644 --- a/raillabel_providerkit/format/understand_ai/quaternion.py +++ b/raillabel_providerkit/format/understand_ai/quaternion.py @@ -18,6 +18,7 @@ class Quaternion: The z component of the quaternion. w: float The w component of the quaternion. + """ x: float @@ -38,8 +39,8 @@ def fromdict(cls, data_dict: dict) -> "Quaternion": ------- Quaternion Converted quaternion. - """ + """ return Quaternion( x=float(data_dict["x"]), y=float(data_dict["y"]), diff --git a/raillabel_providerkit/format/understand_ai/scene.py b/raillabel_providerkit/format/understand_ai/scene.py index 302f920..9d72472 100644 --- a/raillabel_providerkit/format/understand_ai/scene.py +++ b/raillabel_providerkit/format/understand_ai/scene.py @@ -1,10 +1,12 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass -from ..._util._warning import _warning +from raillabel_providerkit._util._warning import _warning + from .coordinate_system import CoordinateSystem from .frame import Frame from .metadata import Metadata @@ -21,14 +23,15 @@ class Scene: coordinate_systems: dict[str, raillabel.format.understand_ai.CoordinateSystem] Global information for sensors regarding calibration. frames: dict[int, raillabel.format.understand_ai.Frame] + """ metadata: Metadata - coordinate_systems: t.Dict[str, CoordinateSystem] - frames: t.Dict[int, Frame] + coordinate_systems: dict[str, CoordinateSystem] + frames: dict[int, Frame] @classmethod - def fromdict(cls, data_dict: dict) -> "Scene": + def fromdict(cls, data_dict: dict) -> Scene: """Generate a Scene from a dictionary in the UAI format. Parameters @@ -40,8 +43,8 @@ def fromdict(cls, data_dict: dict) -> "Scene": ------- Scene Converted scene. - """ + """ return Scene( metadata=Metadata.fromdict(data_dict["metadata"]), coordinate_systems=cls._coordinate_systems_fromdict(data_dict["coordinateSystems"]), @@ -55,6 +58,7 @@ def to_raillabel(self) -> dict: ------- dict: Dictionary of the raillabel scene. + """ return { "openlabel": { @@ -67,7 +71,7 @@ def to_raillabel(self) -> dict: } @classmethod - def _coordinate_systems_fromdict(cls, data_dict: t.List[dict]) -> t.Dict[str, CoordinateSystem]: + def _coordinate_systems_fromdict(cls, data_dict: list[dict]) -> dict[str, CoordinateSystem]: coordinate_systems = {} for cs in data_dict: coordinate_systems[cs["coordinate_system_id"]] = CoordinateSystem.fromdict(cs) @@ -75,7 +79,7 @@ def _coordinate_systems_fromdict(cls, data_dict: t.List[dict]) -> t.Dict[str, Co return coordinate_systems @classmethod - def _frames_fromdict(cls, data_dict: t.List[dict]) -> t.Dict[int, Frame]: + def _frames_fromdict(cls, data_dict: list[dict]) -> dict[int, Frame]: frames = {} for frame in data_dict: frame_id = int(frame["frameId"]) @@ -83,7 +87,7 @@ def _frames_fromdict(cls, data_dict: t.List[dict]) -> t.Dict[int, Frame]: if frame_id in frames: _warning( f"Frame UID {frame_id} is contained more than once in the scene. " - + "The duplicate frame will be omitted." + "The duplicate frame will be omitted." ) continue @@ -108,13 +112,11 @@ def _coordinate_systems_to_raillabel(self) -> dict: return coordinate_systems def _objects_to_raillabel(self) -> dict: - object_dicts = self._collect_all_translated_objects() object_name_counter = {} objects = {} for object_id, object_class in object_dicts.items(): - if object_class not in object_name_counter: object_name_counter[object_class] = 0 diff --git a/raillabel_providerkit/format/understand_ai/segmentation_3d.py b/raillabel_providerkit/format/understand_ai/segmentation_3d.py index a4f0e74..9bb54e9 100644 --- a/raillabel_providerkit/format/understand_ai/segmentation_3d.py +++ b/raillabel_providerkit/format/understand_ai/segmentation_3d.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from uuid import UUID @@ -30,15 +31,16 @@ class Segmentation3d(_Annotation): List of point indices of the lidar pointcloud. number_of_points: int Total number of points in the associated_points. + """ - associated_points: t.List[int] + associated_points: list[int] number_of_points: int OPENLABEL_ID = "vec" @classmethod - def fromdict(cls, data_dict: t.Dict) -> "Segmentation3d": + def fromdict(cls, data_dict: dict) -> Segmentation3d: """Generate a Segmentation3d from a dictionary in the UAI format. Parameters @@ -50,8 +52,8 @@ def fromdict(cls, data_dict: t.Dict) -> "Segmentation3d": ------- Segmentation3d Converted 3d segmentation. - """ + """ return Segmentation3d( id=UUID(data_dict["id"]), object_id=UUID(data_dict["objectId"]), diff --git a/raillabel_providerkit/format/understand_ai/sensor_reference.py b/raillabel_providerkit/format/understand_ai/sensor_reference.py index e6f93e5..8517509 100644 --- a/raillabel_providerkit/format/understand_ai/sensor_reference.py +++ b/raillabel_providerkit/format/understand_ai/sensor_reference.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from decimal import Decimal @@ -18,6 +19,7 @@ class SensorReference: URI to the file containing the frame specific sensor output from the project directory. timestamp: decimal.Decimal Unix timestamp of the sensor recording. + """ type: str @@ -25,7 +27,7 @@ class SensorReference: timestamp: Decimal @classmethod - def fromdict(cls, data_dict: dict) -> "SensorReference": + def fromdict(cls, data_dict: dict) -> SensorReference: """Generate a SensorReference from a dictionary in the UAI format. Parameters @@ -37,13 +39,13 @@ def fromdict(cls, data_dict: dict) -> "SensorReference": ------- SensorReference Converted sensor reference. - """ + """ return SensorReference( type=data_dict["type"], uri=data_dict["uri"], timestamp=Decimal(data_dict["timestamp"]) ) - def to_raillabel(self) -> t.Tuple[str, dict]: + def to_raillabel(self) -> tuple[str, dict]: """Convert to a raillabel compatible dict. Returns @@ -52,8 +54,8 @@ def to_raillabel(self) -> t.Tuple[str, dict]: Friendly identifier of the sensor. sensor_reference: dict Dictionary valid for the raillabel schema. - """ + """ return ( self.type, { diff --git a/raillabel_providerkit/format/understand_ai/size_3d.py b/raillabel_providerkit/format/understand_ai/size_3d.py index 726599a..bd7fade 100644 --- a/raillabel_providerkit/format/understand_ai/size_3d.py +++ b/raillabel_providerkit/format/understand_ai/size_3d.py @@ -16,6 +16,7 @@ class Size3d: Size of the object in the y-dimension. height: float Size of the object in the z-dimension. + """ width: float @@ -35,8 +36,8 @@ def fromdict(cls, data_dict: dict) -> "Size3d": ------- Size3d Converted 3d size. - """ + """ return Size3d( width=float(data_dict["width"]), length=float(data_dict["length"]), diff --git a/raillabel_providerkit/validation/__init__.py b/raillabel_providerkit/validation/__init__.py index 098b319..a3ba7ba 100644 --- a/raillabel_providerkit/validation/__init__.py +++ b/raillabel_providerkit/validation/__init__.py @@ -3,3 +3,5 @@ """Package for validating raillabel data regarding the format requirements.""" from .validate_onthology.validate_onthology import validate_onthology + +__all__ = ["validate_onthology"] diff --git a/raillabel_providerkit/validation/validate.py b/raillabel_providerkit/validation/validate.py index a3dddde..e4f1403 100644 --- a/raillabel_providerkit/validation/validate.py +++ b/raillabel_providerkit/validation/validate.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from pathlib import Path import raillabel @@ -9,7 +10,7 @@ from . import validate_onthology -def validate(scene: raillabel.Scene, onthology: t.Union[dict, Path]) -> t.List[str]: +def validate(scene: raillabel.Scene, onthology: dict | Path) -> list[str]: """Validate a scene based on the Deutsche Bahn Requirements. Parameters @@ -26,8 +27,8 @@ def validate(scene: raillabel.Scene, onthology: t.Union[dict, Path]) -> t.List[s list[str] list of all requirement errors in the scene. If an empty list is returned, then there are no errors present and the scene is valid. - """ + """ errors = [] errors += validate_onthology(scene, onthology) diff --git a/raillabel_providerkit/validation/validate_empty_frames/validate_empty_frames.py b/raillabel_providerkit/validation/validate_empty_frames/validate_empty_frames.py index 1bdc2e9..129bb7f 100644 --- a/raillabel_providerkit/validation/validate_empty_frames/validate_empty_frames.py +++ b/raillabel_providerkit/validation/validate_empty_frames/validate_empty_frames.py @@ -1,12 +1,12 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -from typing import List +from __future__ import annotations import raillabel -def validate_empty_frames(scene: raillabel.Scene) -> List[str]: +def validate_empty_frames(scene: raillabel.Scene) -> list[str]: """Validate whether all frames of a scene have at least one annotation. Parameters @@ -19,8 +19,9 @@ def validate_empty_frames(scene: raillabel.Scene) -> List[str]: list[str] list of all empty frame errors in the scene. If an empty list is returned, then there are no errors present. + """ - errors: List[str] = [] + errors: list[str] = [] for frame_uid, frame in scene.frames.items(): if _is_frame_empty(frame): diff --git a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_attribute_abc.py b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_attribute_abc.py index 04e45d6..a545507 100644 --- a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_attribute_abc.py +++ b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_attribute_abc.py @@ -1,8 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations import abc -import typing as t from dataclasses import dataclass from importlib import import_module from inspect import isclass @@ -19,27 +19,25 @@ def supports(cls, data_dict: dict) -> bool: @classmethod @abc.abstractmethod - def fromdict(cls, data_dict: dict) -> t.Type["_Attribute"]: + def fromdict(cls, data_dict: dict) -> type[_Attribute]: raise NotImplementedError @abc.abstractmethod - def check(self, attribute_name: str, attribute_value, annotation_id: str) -> t.List[str]: + def check( + self, attribute_name: str, attribute_value: bool | float | str | list, annotation_id: str + ) -> list[str]: raise NotImplementedError -def attribute_classes() -> t.List[t.Type[_Attribute]]: +def attribute_classes() -> list[type[_Attribute]]: """Return dictionary with Attribute child classes.""" return ATTRIBUTE_CLASSES -def _collect_attribute_classes(): +def _collect_attribute_classes() -> None: """Collect attribute child classes and store them.""" - - global ATTRIBUTE_CLASSES - package_dir = str(Path(__file__).resolve().parent) for _, module_name, _ in iter_modules([package_dir]): - module = import_module( f"raillabel_providerkit.validation.validate_onthology._onthology_classes._attributes.{module_name}" ) diff --git a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_boolean_attribute.py b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_boolean_attribute.py index 0a9ead5..ca92b0b 100644 --- a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_boolean_attribute.py +++ b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_boolean_attribute.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from ._attribute_abc import _Attribute @@ -10,20 +11,22 @@ @dataclass class _BooleanAttribute(_Attribute): @classmethod - def supports(cls, data_dict: dict): + def supports(cls, data_dict: dict) -> bool: return data_dict == "boolean" @classmethod - def fromdict(cls, data_dict: dict): + def fromdict(cls, _: dict) -> _BooleanAttribute: return _BooleanAttribute() - def check(self, attribute_name: str, attribute_value, annotation_id: str) -> t.List[str]: + def check( + self, attribute_name: str, attribute_value: bool | float | str | list, annotation_id: str + ) -> list[str]: errors = [] - if type(attribute_value) != bool: + if type(attribute_value) is not bool: errors.append( f"Attribute '{attribute_name}' of annotation {annotation_id} is of type " - + f"'{attribute_value.__class__.__name__}' (should be 'bool')." + f"'{attribute_value.__class__.__name__}' (should be 'bool')." ) return errors diff --git a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_integer_attribute.py b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_integer_attribute.py index 19931f0..f838f56 100644 --- a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_integer_attribute.py +++ b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_integer_attribute.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from ._attribute_abc import _Attribute @@ -10,20 +11,22 @@ @dataclass class _IntegerAttribute(_Attribute): @classmethod - def supports(cls, data_dict: dict): + def supports(cls, data_dict: dict) -> bool: return data_dict == "integer" @classmethod - def fromdict(cls, data_dict: dict): + def fromdict(cls, _: dict) -> _IntegerAttribute: return _IntegerAttribute() - def check(self, attribute_name: str, attribute_value, annotation_id: str) -> t.List[str]: + def check( + self, attribute_name: str, attribute_value: bool | float | str | list, annotation_id: str + ) -> list[str]: errors = [] - if type(attribute_value) != int: + if type(attribute_value) is not int: errors.append( f"Attribute '{attribute_name}' of annotation {annotation_id} is of type " - + f"'{attribute_value.__class__.__name__}' (should be 'int')." + f"'{attribute_value.__class__.__name__}' (should be 'int')." ) return errors diff --git a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_multi_select_attribute.py b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_multi_select_attribute.py index 5805194..7f6e8ac 100644 --- a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_multi_select_attribute.py +++ b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_multi_select_attribute.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from ._attribute_abc import _Attribute @@ -9,32 +10,32 @@ @dataclass class _MultiSelectAttribute(_Attribute): - - options: t.Set[str] + options: set[str] @classmethod - def supports(cls, data_dict: dict): + def supports(cls, data_dict: dict) -> bool: return ( - type(data_dict) == dict and "type" in data_dict and data_dict["type"] == "multi-select" + type(data_dict) is dict and "type" in data_dict and data_dict["type"] == "multi-select" ) @classmethod - def fromdict(cls, data_dict: dict): + def fromdict(cls, data_dict: dict) -> _MultiSelectAttribute: return _MultiSelectAttribute(options=set(data_dict["options"])) - def check(self, attribute_name: str, attribute_values, annotation_id: str) -> t.List[str]: - - if type(attribute_values) != list: + def check( + self, attribute_name: str, attribute_values: bool | float | str | list, annotation_id: str + ) -> list[str]: + if type(attribute_values) is not list: return [ f"Attribute '{attribute_name}' of annotation {annotation_id} is of type " - + f"'{attribute_values.__class__.__name__}' (should be 'list')." + f"'{attribute_values.__class__.__name__}' (should be 'list')." ] for attribute_value in attribute_values: if attribute_value not in self.options: return [ f"Attribute '{attribute_name}' of annotation {annotation_id} has an undefined " - + f"value '{attribute_value}' (defined options: {self._stringify_options()})." + f"value '{attribute_value}' (defined options: {self._stringify_options()})." ] return [] @@ -42,7 +43,7 @@ def check(self, attribute_name: str, attribute_values, annotation_id: str) -> t. def _stringify_options(self) -> str: options_str = "" - for option in sorted(list(self.options)): + for option in sorted(self.options): options_str += f"'{option}', " if options_str != "": diff --git a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_single_select_attribute.py b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_single_select_attribute.py index 7ce4309..8f1ea8f 100644 --- a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_single_select_attribute.py +++ b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_single_select_attribute.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from ._attribute_abc import _Attribute @@ -9,31 +10,31 @@ @dataclass class _SingleSelectAttribute(_Attribute): - - options: t.Set[str] + options: set[str] @classmethod - def supports(cls, data_dict: dict): + def supports(cls, data_dict: dict) -> bool: return ( - type(data_dict) == dict and "type" in data_dict and data_dict["type"] == "single-select" + type(data_dict) is dict and "type" in data_dict and data_dict["type"] == "single-select" ) @classmethod - def fromdict(cls, data_dict: dict): + def fromdict(cls, data_dict: dict) -> _SingleSelectAttribute: return _SingleSelectAttribute(options=set(data_dict["options"])) - def check(self, attribute_name: str, attribute_value, annotation_id: str) -> t.List[str]: - - if type(attribute_value) != str: + def check( + self, attribute_name: str, attribute_value: bool | float | str | list, annotation_id: str + ) -> list[str]: + if type(attribute_value) is not str: return [ f"Attribute '{attribute_name}' of annotation {annotation_id} is of type " - + f"'{attribute_value.__class__.__name__}' (should be 'str')." + f"'{attribute_value.__class__.__name__}' (should be 'str')." ] if attribute_value not in self.options: return [ f"Attribute '{attribute_name}' of annotation {annotation_id} has an undefined " - + f"value '{attribute_value}' (defined options: {self._stringify_options()})." + f"value '{attribute_value}' (defined options: {self._stringify_options()})." ] return [] @@ -41,7 +42,7 @@ def check(self, attribute_name: str, attribute_value, annotation_id: str) -> t.L def _stringify_options(self) -> str: options_str = "" - for option in sorted(list(self.options)): + for option in sorted(self.options): options_str += f"'{option}', " if options_str != "": diff --git a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_string_attribute.py b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_string_attribute.py index 1aef07f..bbbd30f 100644 --- a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_string_attribute.py +++ b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_string_attribute.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from ._attribute_abc import _Attribute @@ -10,20 +11,22 @@ @dataclass class _StringAttribute(_Attribute): @classmethod - def supports(cls, data_dict: dict): + def supports(cls, data_dict: dict) -> bool: return data_dict == "string" @classmethod - def fromdict(cls, data_dict: dict): + def fromdict(cls, _: dict) -> _StringAttribute: return _StringAttribute() - def check(self, attribute_name: str, attribute_value, annotation_id: str) -> t.List[str]: + def check( + self, attribute_name: str, attribute_value: bool | float | str | list, annotation_id: str + ) -> list[str]: errors = [] - if type(attribute_value) != str: + if type(attribute_value) is not str: errors.append( f"Attribute '{attribute_name}' of annotation {annotation_id} is of type " - + f"'{attribute_value.__class__.__name__}' (should be 'str')." + f"'{attribute_value.__class__.__name__}' (should be 'str')." ) return errors diff --git a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_vector_attribute.py b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_vector_attribute.py index 3d9deaa..295d4b5 100644 --- a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_vector_attribute.py +++ b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_attributes/_vector_attribute.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from ._attribute_abc import _Attribute @@ -10,20 +11,22 @@ @dataclass class _VectorAttribute(_Attribute): @classmethod - def supports(cls, data_dict: dict): + def supports(cls, data_dict: dict) -> bool: return data_dict == "vector" @classmethod - def fromdict(cls, data_dict: dict): + def fromdict(cls, _: dict) -> _VectorAttribute: return _VectorAttribute() - def check(self, attribute_name: str, attribute_value, annotation_id: str) -> t.List[str]: + def check( + self, attribute_name: str, attribute_value: list | int | str | bool, annotation_id: str + ) -> list[str]: errors = [] - if type(attribute_value) != list: + if type(attribute_value) is not list: errors.append( f"Attribute '{attribute_name}' of annotation {annotation_id} is of type " - + f"'{attribute_value.__class__.__name__}' (should be 'list')." + f"'{attribute_value.__class__.__name__}' (should be 'list')." ) return errors diff --git a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_object_classes.py b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_object_classes.py index b691f82..db7969a 100644 --- a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_object_classes.py +++ b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_object_classes.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass import raillabel @@ -12,12 +13,11 @@ @dataclass class _ObjectClass: - attributes: t.Dict[str, t.Type[_Attribute]] - sensor_types: t.Dict[raillabel.format.SensorType, _SensorType] + attributes: dict[str, type[_Attribute]] + sensor_types: dict[raillabel.format.SensorType, _SensorType] @classmethod - def fromdict(cls, data_dict: dict) -> "_ObjectClass": - + def fromdict(cls, data_dict: dict) -> _ObjectClass: if "attributes" not in data_dict: data_dict["attributes"] = {} @@ -32,7 +32,7 @@ def fromdict(cls, data_dict: dict) -> "_ObjectClass": sensor_types=cls._sensor_types_fromdict(data_dict["sensor_types"]), ) - def check(self, annotation: t.Type[raillabel.format._ObjectAnnotation]) -> t.List[str]: + def check(self, annotation: type[raillabel.format._ObjectAnnotation]) -> list[str]: errors = [] errors.extend(self._check_undefined_attributes(annotation)) @@ -42,52 +42,41 @@ def check(self, annotation: t.Type[raillabel.format._ObjectAnnotation]) -> t.Lis return errors @classmethod - def _attribute_fromdict(cls, attribute: dict or str) -> t.Type[_Attribute]: - + def _attribute_fromdict(cls, attribute: dict or str) -> type[_Attribute]: for attribute_class in attribute_classes(): if attribute_class.supports(attribute): return attribute_class.fromdict(attribute) - raise ValueError() + raise ValueError @classmethod - def _sensor_types_fromdict(cls, sensor_types_dict: dict) -> t.Dict[str, _SensorType]: - sensor_types = {} - - for type_id, sensor_type_dict in sensor_types_dict.items(): - sensor_types[raillabel.format.SensorType(type_id)] = _SensorType.fromdict( - sensor_type_dict - ) - - return sensor_types + def _sensor_types_fromdict(cls, sensor_types_dict: dict) -> dict[str, _SensorType]: + return { + raillabel.format.SensorType(type_id): _SensorType.fromdict(sensor_type_dict) + for type_id, sensor_type_dict in sensor_types_dict.items() + } def _check_undefined_attributes( - self, annotation: t.Type[raillabel.format._ObjectAnnotation] - ) -> t.List[str]: - errors = [] - - applicable_attributes = self._compile_applicable_attributes(annotation) - - for attr_name in annotation.attributes.keys(): - if attr_name not in applicable_attributes: - errors.append(f"Undefined attribute '{attr_name}' in annotation {annotation.uid}.") - - return errors + self, annotation: type[raillabel.format._ObjectAnnotation] + ) -> list[str]: + return [ + f"Undefined attribute '{attr_name}' in annotation {annotation.uid}." + for attr_name in annotation.attributes + if attr_name not in self._compile_applicable_attributes(annotation) + ] def _check_missing_attributes( - self, annotation: t.Type[raillabel.format._ObjectAnnotation] - ) -> t.List[str]: - errors = [] - - for attr_name in self._compile_applicable_attributes(annotation): - if attr_name not in annotation.attributes: - errors.append(f"Missing attribute '{attr_name}' in annotation {annotation.uid}.") - - return errors + self, annotation: type[raillabel.format._ObjectAnnotation] + ) -> list[str]: + return [ + f"Missing attribute '{attr_name}' in annotation {annotation.uid}." + for attr_name in self._compile_applicable_attributes(annotation) + if attr_name not in annotation.attributes + ] def _check_false_attribute_type( - self, annotation: t.Type[raillabel.format._ObjectAnnotation] - ) -> t.List[str]: + self, annotation: type[raillabel.format._ObjectAnnotation] + ) -> list[str]: errors = [] applicable_attributes = self._compile_applicable_attributes(annotation) @@ -102,9 +91,8 @@ def _check_false_attribute_type( return errors def _compile_applicable_attributes( - self, annotation: t.Type[raillabel.format._ObjectAnnotation] - ) -> t.Dict[str, t.Type[_Attribute]]: - + self, annotation: type[raillabel.format._ObjectAnnotation] + ) -> dict[str, type[_Attribute]]: applicable_attributes = self.attributes if annotation.sensor.type in self.sensor_types: diff --git a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_onthology.py b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_onthology.py index 456f0b2..73201d3 100644 --- a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_onthology.py +++ b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_onthology.py @@ -1,6 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + import typing as t from dataclasses import dataclass @@ -11,16 +13,16 @@ @dataclass class _Onthology: - classes: t.Dict[str, _ObjectClass] - errors = [] + classes: dict[str, _ObjectClass] + errors: t.ClassVar = [] @classmethod - def fromdict(cls, data_dict: dict) -> "_Onthology": + def fromdict(cls, data_dict: dict) -> _Onthology: return _Onthology( {class_id: _ObjectClass.fromdict(class_) for class_id, class_ in data_dict.items()} ) - def check(self, scene: raillabel.Scene) -> t.List[str]: + def check(self, scene: raillabel.Scene) -> list[str]: self.errors = [] self._check_class_validity(scene) @@ -30,7 +32,7 @@ def check(self, scene: raillabel.Scene) -> t.List[str]: return self.errors - def _check_class_validity(self, scene: raillabel.Scene) -> t.List[str]: + def _check_class_validity(self, scene: raillabel.Scene) -> list[str]: object_classes_in_scene = [obj.type for obj in scene.objects.values()] for object_class in object_classes_in_scene: @@ -39,7 +41,7 @@ def _check_class_validity(self, scene: raillabel.Scene) -> t.List[str]: def _compile_annotations( self, scene: raillabel.Scene - ) -> t.List[t.Type[raillabel.format._ObjectAnnotation]]: + ) -> list[type[raillabel.format._ObjectAnnotation]]: annotations = [] for frame in scene.frames.values(): annotations.extend(list(frame.annotations.values())) diff --git a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_sensor_type.py b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_sensor_type.py index a74960b..1c29cae 100644 --- a/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_sensor_type.py +++ b/raillabel_providerkit/validation/validate_onthology/_onthology_classes/_sensor_type.py @@ -1,7 +1,8 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from dataclasses import dataclass from ._attributes._attribute_abc import _Attribute, attribute_classes @@ -9,11 +10,10 @@ @dataclass class _SensorType: - attributes: t.Dict[str, t.Type[_Attribute]] + attributes: dict[str, type[_Attribute]] @classmethod - def fromdict(cls, data_dict: dict) -> "_SensorType": - + def fromdict(cls, data_dict: dict) -> _SensorType: if "attributes" not in data_dict: data_dict["attributes"] = {} @@ -25,10 +25,9 @@ def fromdict(cls, data_dict: dict) -> "_SensorType": ) @classmethod - def _attribute_fromdict(cls, attribute: dict or str) -> t.Type[_Attribute]: - + def _attribute_fromdict(cls, attribute: dict | str) -> type[_Attribute]: for attribute_class in attribute_classes(): if attribute_class.supports(attribute): return attribute_class.fromdict(attribute) - raise ValueError() + raise ValueError diff --git a/raillabel_providerkit/validation/validate_onthology/validate_onthology.py b/raillabel_providerkit/validation/validate_onthology/validate_onthology.py index 95bb40a..aab48ba 100644 --- a/raillabel_providerkit/validation/validate_onthology/validate_onthology.py +++ b/raillabel_providerkit/validation/validate_onthology/validate_onthology.py @@ -1,18 +1,20 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -import typing as t +from __future__ import annotations + from pathlib import Path import jsonschema import raillabel import yaml -from ...exceptions import OnthologySchemaError +from raillabel_providerkit.exceptions import OnthologySchemaError + from ._onthology_classes._onthology import _Onthology -def validate_onthology(scene: raillabel.Scene, onthology: t.Union[dict, Path]) -> t.List[str]: +def validate_onthology(scene: raillabel.Scene, onthology: dict | Path) -> list[str]: """Validate a scene based on the classes and attributes. Parameters @@ -29,8 +31,8 @@ def validate_onthology(scene: raillabel.Scene, onthology: t.Union[dict, Path]) - list[str] list of all onthology errors in the scene. If an empty list is returned, then there are no errors present. - """ + """ if isinstance(onthology, Path): onthology = _load_onthology(Path(onthology)) @@ -43,14 +45,13 @@ def validate_onthology(scene: raillabel.Scene, onthology: t.Union[dict, Path]) - def _load_onthology(path: Path) -> dict: with path.open() as f: - onthology = yaml.safe_load(f) - return onthology + return yaml.safe_load(f) -def _validate_onthology_schema(onthology: dict): - SCHEMA_PATH = Path(__file__).parent / "onthology_schema_v1.yaml" +def _validate_onthology_schema(onthology: dict) -> None: + schema_path = Path(__file__).parent / "onthology_schema_v1.yaml" - with SCHEMA_PATH.open() as f: + with schema_path.open() as f: onthology_schema = yaml.safe_load(f) validator = jsonschema.Draft7Validator(schema=onthology_schema) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..aabc64e --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,2 @@ +# Copyright DB Netz AG and contributors +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/conftest.py b/tests/conftest.py index fba5e02..cad0f89 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,9 +11,10 @@ json_data_directories = [ Path(__file__).parent / "__test_assets__", - Path(__file__).parent.parent / "raillabel_providerkit" / "format" + Path(__file__).parent.parent / "raillabel_providerkit" / "format", ] + @pytest.fixture def json_paths(request) -> t.Dict[str, Path]: out = _fetch_json_paths_from_cache(request) @@ -23,9 +24,11 @@ def json_paths(request) -> t.Dict[str, Path]: return out + def _fetch_json_paths_from_cache(request) -> t.Optional[t.Dict[str, Path]]: return request.config.cache.get("json_paths", None) + def _collect_json_paths() -> t.List[Path]: out = [] @@ -34,6 +37,7 @@ def _collect_json_paths() -> t.List[Path]: return out + def _get_file_identifier(path: Path) -> str: """Return relative path from test asset dir as string.""" @@ -43,13 +47,14 @@ def _get_file_identifier(path: Path) -> str: test_assets_dir_index = path.parts.index("__test_assets__") relative_path = "" - for part in path.parts[test_assets_dir_index+1:-1]: + for part in path.parts[test_assets_dir_index + 1 : -1]: relative_path += part + "/" relative_path += path.stem return relative_path + @pytest.fixture def json_data(request) -> t.Dict[str, dict]: out = _fetch_json_data_from_cache(request) @@ -59,14 +64,17 @@ def json_data(request) -> t.Dict[str, dict]: return out + def _fetch_json_data_from_cache(request) -> t.Optional[t.Dict[str, Path]]: return request.config.cache.get("json_data", None) + def _load_json_data(path: Path) -> dict: with path.open() as f: out = json.load(f) return out + @pytest.fixture def empty_scene() -> raillabel.Scene: return raillabel.Scene( @@ -76,6 +84,7 @@ def empty_scene() -> raillabel.Scene: frames={}, ) + @pytest.fixture def default_frame(empty_annotation) -> raillabel.format.Frame: return raillabel.format.Frame( @@ -83,20 +92,14 @@ def default_frame(empty_annotation) -> raillabel.format.Frame: timestamp=None, sensors={}, frame_data={}, - annotations={ - "0fb4fc0b-3eeb-443a-8dd0-2caf9912d016": empty_annotation - } + annotations={"0fb4fc0b-3eeb-443a-8dd0-2caf9912d016": empty_annotation}, ) + @pytest.fixture def empty_frame() -> raillabel.format.Frame: - return raillabel.format.Frame( - uid=0, - timestamp=None, - sensors={}, - frame_data={}, - annotations={} - ) + return raillabel.format.Frame(uid=0, timestamp=None, sensors={}, frame_data={}, annotations={}) + @pytest.fixture def empty_annotation() -> raillabel.format.Bbox: diff --git a/tests/test_raillabel_providerkit/_util/test_warning.py b/tests/test_raillabel_providerkit/_util/test_warning.py index 5d98cd6..2bd1212 100644 --- a/tests/test_raillabel_providerkit/_util/test_warning.py +++ b/tests/test_raillabel_providerkit/_util/test_warning.py @@ -9,15 +9,15 @@ def test_issue_warning(): with _WarningsLogger() as logger: _warning("lorem ipsum") - assert logger.warnings == [ - "lorem ipsum" - ] + assert logger.warnings == ["lorem ipsum"] + def test_handover_exception(): with pytest.raises(RuntimeError): with _WarningsLogger(): raise RuntimeError("weewoo something went wrong") + def test_clear_warnings(): with _WarningsLogger(): _warning("lorem ipsum") diff --git a/tests/test_raillabel_providerkit/convert/loader_classes/test_loader_understand_ai.py b/tests/test_raillabel_providerkit/convert/loader_classes/test_loader_understand_ai.py index 41a80bb..9163d94 100644 --- a/tests/test_raillabel_providerkit/convert/loader_classes/test_loader_understand_ai.py +++ b/tests/test_raillabel_providerkit/convert/loader_classes/test_loader_understand_ai.py @@ -6,6 +6,7 @@ from raillabel_providerkit.convert.loader_classes.loader_understand_ai import LoaderUnderstandAi + def test_supports__true(json_data): assert LoaderUnderstandAi().supports(json_data["understand_ai_real_life"]) @@ -34,17 +35,19 @@ def test_load(json_data): input_data_raillabel = remove_non_parsed_fields(json_data["openlabel_v1_short"]) input_data_uai = json_data["understand_ai_t4_short"] - scene_ground_truth = raillabel.load_.loader_classes.loader_raillabel.LoaderRailLabel().load(input_data_raillabel, validate=False) + scene_ground_truth = raillabel.load_.loader_classes.loader_raillabel.LoaderRailLabel().load( + input_data_raillabel, validate=False + ) scene = LoaderUnderstandAi().load(input_data_uai, validate_schema=False) scene.metadata = scene_ground_truth.metadata assert scene == scene_ground_truth + def remove_non_parsed_fields(raillabel_data: dict) -> dict: """Return RailLabel file with frame_data and poly3ds removed.""" for frame in raillabel_data["openlabel"]["frames"].values(): - if "frame_data" in frame["frame_properties"]: del frame["frame_properties"]["frame_data"] diff --git a/tests/test_raillabel_providerkit/convert/test_convert.py b/tests/test_raillabel_providerkit/convert/test_convert.py index 79e8709..dd997ad 100644 --- a/tests/test_raillabel_providerkit/convert/test_convert.py +++ b/tests/test_raillabel_providerkit/convert/test_convert.py @@ -14,10 +14,11 @@ def test_convert_uai_select_class(json_data): scene = raillabel_providerkit.convert(data=json_data["understand_ai_t4_short"]) assert len(scene.frames) != 0 + def test_convert_uai_provide_class(json_data): scene = raillabel_providerkit.convert( data=json_data["understand_ai_t4_short"], - loader_class=raillabel_providerkit.loader_classes.LoaderUnderstandAi + loader_class=raillabel_providerkit.loader_classes.LoaderUnderstandAi, ) assert len(scene.frames) != 0 diff --git a/tests/test_raillabel_providerkit/format/test_raillabel_v2_schema.py b/tests/test_raillabel_providerkit/format/test_raillabel_v2_schema.py index ad8102d..e209edf 100644 --- a/tests/test_raillabel_providerkit/format/test_raillabel_v2_schema.py +++ b/tests/test_raillabel_providerkit/format/test_raillabel_v2_schema.py @@ -8,24 +8,20 @@ def test_metaschema_validation(json_data): - assert jsonschema.validate( - json_data["raillabel_schema"], - json_data["metaschema"] - ) is None + assert jsonschema.validate(json_data["raillabel_schema"], json_data["metaschema"]) is None def test_sample_data_validation_subschema(json_data): - assert jsonschema.validate( - json_data["openlabel_v1_short"], - json_data["raillabel_schema"] - ) is None + assert ( + jsonschema.validate(json_data["openlabel_v1_short"], json_data["raillabel_schema"]) is None + ) def test_sample_data_validation_superschema(json_data): - assert jsonschema.validate( - json_data["openlabel_v1_short"], - json_data["openlabel_v1_schema"] - ) is None + assert ( + jsonschema.validate(json_data["openlabel_v1_short"], json_data["openlabel_v1_schema"]) + is None + ) # Executes the test if the file is called diff --git a/tests/test_raillabel_providerkit/format/understand_ai/__init__.py b/tests/test_raillabel_providerkit/format/understand_ai/__init__.py new file mode 100644 index 0000000..aabc64e --- /dev/null +++ b/tests/test_raillabel_providerkit/format/understand_ai/__init__.py @@ -0,0 +1,2 @@ +# Copyright DB Netz AG and contributors +# SPDX-License-Identifier: Apache-2.0 diff --git a/tests/test_raillabel_providerkit/format/understand_ai/conftest.py b/tests/test_raillabel_providerkit/format/understand_ai/conftest.py index 38a72d7..7f14471 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/conftest.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/conftest.py @@ -1,18 +1,18 @@ # Copyright DB Netz AG and contributors # SPDX-License-Identifier: Apache-2.0 -from test_uai_attributes import attributes_raillabel_dict, attributes_uai, attributes_uai_dict -from test_uai_bounding_box_2d import ( +from .test_uai_attributes import attributes_raillabel_dict, attributes_uai, attributes_uai_dict +from .test_uai_bounding_box_2d import ( bounding_box_2d_raillabel_dict, bounding_box_2d_uai, bounding_box_2d_uai_dict, ) -from test_uai_bounding_box_3d import ( +from .test_uai_bounding_box_3d import ( bounding_box_3d_raillabel_dict, bounding_box_3d_uai, bounding_box_3d_uai_dict, ) -from test_uai_coordinate_system import ( +from .test_uai_coordinate_system import ( coordinate_system_camera_raillabel_dict, coordinate_system_camera_translated_uid, coordinate_system_camera_uai, @@ -26,19 +26,19 @@ coordinate_system_radar_uai, coordinate_system_radar_uai_dict, ) -from test_uai_frame import frame_raillabel_dict, frame_uai, frame_uai_dict -from test_uai_metadata import metadata_raillabel_dict, metadata_uai, metadata_uai_dict -from test_uai_point_3d import point_3d_uai, point_3d_uai_dict, point_3d_vec -from test_uai_polygon_2d import polygon_2d_raillabel_dict, polygon_2d_uai, polygon_2d_uai_dict -from test_uai_polyline_2d import polyline_2d_raillabel_dict, polyline_2d_uai, polyline_2d_uai_dict -from test_uai_quaternion import quaternion_uai, quaternion_uai_dict, quaternion_vec -from test_uai_scene import scene_raillabel_dict, scene_uai, scene_uai_dict -from test_uai_segmentation_3d import ( +from .test_uai_frame import frame_raillabel_dict, frame_uai, frame_uai_dict +from .test_uai_metadata import metadata_raillabel_dict, metadata_uai, metadata_uai_dict +from .test_uai_point_3d import point_3d_uai, point_3d_uai_dict, point_3d_vec +from .test_uai_polygon_2d import polygon_2d_raillabel_dict, polygon_2d_uai, polygon_2d_uai_dict +from .test_uai_polyline_2d import polyline_2d_raillabel_dict, polyline_2d_uai, polyline_2d_uai_dict +from .test_uai_quaternion import quaternion_uai, quaternion_uai_dict, quaternion_vec +from .test_uai_scene import scene_raillabel_dict, scene_uai, scene_uai_dict +from .test_uai_segmentation_3d import ( segmentation_3d_raillabel_dict, segmentation_3d_uai, segmentation_3d_uai_dict, ) -from test_uai_sensor_reference import ( +from .test_uai_sensor_reference import ( sensor_camera_raillabel_dict, sensor_camera_uai, sensor_camera_uai_dict, @@ -46,4 +46,4 @@ sensor_lidar_uai, sensor_lidar_uai_dict, ) -from test_uai_size_3d import size_3d_uai, size_3d_uai_dict, size_3d_vec +from .test_uai_size_3d import size_3d_uai, size_3d_uai_dict, size_3d_vec diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_attributes.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_attributes.py index d234cf0..c14d9a6 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_attributes.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_attributes.py @@ -5,47 +5,21 @@ # == Fixtures ========================= + @pytest.fixture def attributes_uai_dict() -> dict: - return { - "isDummy": False, - "carries": "nothing", - "connectedTo": [], - "pose": "upright" - } + return {"isDummy": False, "carries": "nothing", "connectedTo": [], "pose": "upright"} + @pytest.fixture def attributes_uai() -> dict: - return { - "isDummy": False, - "carries": "nothing", - "connectedTo": [], - "pose": "upright" - } + return {"isDummy": False, "carries": "nothing", "connectedTo": [], "pose": "upright"} + @pytest.fixture def attributes_raillabel_dict() -> dict: return { - "text": [ - { - "name": "carries", - "val": "nothing" - }, - { - "name": "pose", - "val": "upright" - } - ], - "boolean": [ - { - "name": "isDummy", - "val": False - } - ], - "vec": [ - { - "name": "connectedTo", - "val": [] - } - ] + "text": [{"name": "carries", "val": "nothing"}, {"name": "pose", "val": "upright"}], + "boolean": [{"name": "isDummy", "val": False}], + "vec": [{"name": "connectedTo", "val": []}], } diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_bounding_box_2d.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_bounding_box_2d.py index 16d97e4..b7d1a11 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_bounding_box_2d.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_bounding_box_2d.py @@ -9,22 +9,19 @@ # == Fixtures ========================= + @pytest.fixture def bounding_box_2d_uai_dict(sensor_camera_uai_dict, attributes_uai_dict) -> dict: return { "id": "2f2a1d7f-56d1-435c-a3ec-d6b8fdaaa965", "objectId": "48c988bd-76f1-423f-b46d-7e7acb859f31", "className": "test_class", - "geometry": { - "xMin": 1, - "yMin": 2, - "xMax": 3, - "yMax": 4 - }, + "geometry": {"xMin": 1, "yMin": 2, "xMax": 3, "yMax": 4}, "attributes": attributes_uai_dict, - "sensor": sensor_camera_uai_dict + "sensor": sensor_camera_uai_dict, } + @pytest.fixture def bounding_box_2d_uai(attributes_uai, sensor_camera_uai) -> dict: return uai_format.BoundingBox2d( @@ -39,16 +36,14 @@ def bounding_box_2d_uai(attributes_uai, sensor_camera_uai) -> dict: sensor=sensor_camera_uai, ) + @pytest.fixture -def bounding_box_2d_raillabel_dict(attributes_raillabel_dict, coordinate_system_camera_translated_uid) -> dict: +def bounding_box_2d_raillabel_dict( + attributes_raillabel_dict, coordinate_system_camera_translated_uid +) -> dict: return { "name": "2f2a1d7f-56d1-435c-a3ec-d6b8fdaaa965", - "val": [ - 2.0, - 3.0, - 2.0, - 2.0 - ], + "val": [2.0, 3.0, 2.0, 2.0], "coordinate_system": coordinate_system_camera_translated_uid, "attributes": attributes_raillabel_dict, } @@ -56,23 +51,16 @@ def bounding_box_2d_raillabel_dict(attributes_raillabel_dict, coordinate_system_ # == Tests ============================ -def test_fromdict( - attributes_uai_dict, attributes_uai, - sensor_camera_uai_dict, sensor_camera_uai -): + +def test_fromdict(attributes_uai_dict, attributes_uai, sensor_camera_uai_dict, sensor_camera_uai): bounding_box_2d = uai_format.BoundingBox2d.fromdict( { "id": "2f2a1d7f-56d1-435c-a3ec-d6b8fdaaa965", "objectId": "48c988bd-76f1-423f-b46d-7e7acb859f31", "className": "test_class", - "geometry": { - "xMin": 1, - "yMin": 2, - "xMax": 3, - "yMax": 4 - }, + "geometry": {"xMin": 1, "yMin": 2, "xMax": 3, "yMax": 4}, "attributes": attributes_uai_dict, - "sensor": sensor_camera_uai_dict + "sensor": sensor_camera_uai_dict, } ) @@ -88,8 +76,11 @@ def test_fromdict( def test_to_raillabel( - attributes_uai, attributes_raillabel_dict, - sensor_camera_uai, sensor_camera_raillabel_dict, coordinate_system_camera_translated_uid, + attributes_uai, + attributes_raillabel_dict, + sensor_camera_uai, + sensor_camera_raillabel_dict, + coordinate_system_camera_translated_uid, ): bounding_box_2d = uai_format.BoundingBox2d( id=UUID("2f2a1d7f-56d1-435c-a3ec-d6b8fdaaa965"), @@ -107,12 +98,7 @@ def test_to_raillabel( assert data_dict == { "name": "2f2a1d7f-56d1-435c-a3ec-d6b8fdaaa965", - "val": [ - 2.0, - 3.0, - 2.0, - 2.0 - ], + "val": [2.0, 3.0, 2.0, 2.0], "coordinate_system": coordinate_system_camera_translated_uid, "attributes": attributes_raillabel_dict, } @@ -120,7 +106,9 @@ def test_to_raillabel( assert translated_class_id == translate_class_id(bounding_box_2d.class_name) assert sensor_reference == sensor_camera_raillabel_dict + if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear", "-v"]) diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_bounding_box_3d.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_bounding_box_3d.py index 5833e8e..cdf4264 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_bounding_box_3d.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_bounding_box_3d.py @@ -9,11 +9,14 @@ # == Fixtures ========================= + @pytest.fixture def bounding_box_3d_uai_dict( - point_3d_uai_dict, size_3d_uai_dict, quaternion_uai_dict, + point_3d_uai_dict, + size_3d_uai_dict, + quaternion_uai_dict, sensor_lidar_uai_dict, - attributes_uai_dict + attributes_uai_dict, ) -> dict: return { "id": "910399ec-da3e-4d7e-be42-ef8e53e38ca6", @@ -22,7 +25,7 @@ def bounding_box_3d_uai_dict( "geometry": { "size": size_3d_uai_dict, "center": point_3d_uai_dict, - "quaternion": quaternion_uai_dict + "quaternion": quaternion_uai_dict, }, "attributes": attributes_uai_dict, "sensor": sensor_lidar_uai_dict, @@ -30,11 +33,7 @@ def bounding_box_3d_uai_dict( @pytest.fixture -def bounding_box_3d_uai( - point_3d_uai, size_3d_uai, quaternion_uai, - attributes_uai, - sensor_lidar_uai -): +def bounding_box_3d_uai(point_3d_uai, size_3d_uai, quaternion_uai, attributes_uai, sensor_lidar_uai): return uai_format.BoundingBox3d( id=UUID("910399ec-da3e-4d7e-be42-ef8e53e38ca6"), object_id=UUID("48c988bd-76f1-423f-b46d-7e7acb859f31"), @@ -46,26 +45,37 @@ def bounding_box_3d_uai( sensor=sensor_lidar_uai, ) + @pytest.fixture def bounding_box_3d_raillabel_dict( - point_3d_vec, size_3d_vec, quaternion_vec, - coordinate_system_lidar_translated_uid, attributes_raillabel_dict + point_3d_vec, + size_3d_vec, + quaternion_vec, + coordinate_system_lidar_translated_uid, + attributes_raillabel_dict, ) -> dict: return { "name": "910399ec-da3e-4d7e-be42-ef8e53e38ca6", "val": point_3d_vec + quaternion_vec + size_3d_vec, "coordinate_system": coordinate_system_lidar_translated_uid, - "attributes": attributes_raillabel_dict + "attributes": attributes_raillabel_dict, } # == Tests ============================ + def test_fromdict( - size_3d_uai_dict, point_3d_uai_dict, quaternion_uai_dict, - size_3d_uai, point_3d_uai, quaternion_uai, - sensor_lidar_uai_dict, sensor_lidar_uai, - attributes_uai_dict, attributes_uai, + size_3d_uai_dict, + point_3d_uai_dict, + quaternion_uai_dict, + size_3d_uai, + point_3d_uai, + quaternion_uai, + sensor_lidar_uai_dict, + sensor_lidar_uai, + attributes_uai_dict, + attributes_uai, ): bounding_box_3d = uai_format.BoundingBox3d.fromdict( { @@ -75,7 +85,7 @@ def test_fromdict( "geometry": { "size": size_3d_uai_dict, "center": point_3d_uai_dict, - "quaternion": quaternion_uai_dict + "quaternion": quaternion_uai_dict, }, "attributes": attributes_uai_dict, "sensor": sensor_lidar_uai_dict, @@ -93,10 +103,17 @@ def test_fromdict( def test_to_raillabel( - size_3d_uai, point_3d_uai, quaternion_uai, - point_3d_vec, quaternion_vec, size_3d_vec, - attributes_uai, attributes_raillabel_dict, - sensor_lidar_uai, coordinate_system_lidar_translated_uid, sensor_lidar_raillabel_dict, + size_3d_uai, + point_3d_uai, + quaternion_uai, + point_3d_vec, + quaternion_vec, + size_3d_vec, + attributes_uai, + attributes_raillabel_dict, + sensor_lidar_uai, + coordinate_system_lidar_translated_uid, + sensor_lidar_raillabel_dict, ): bounding_box_3d = uai_format.BoundingBox3d( id=UUID("910399ec-da3e-4d7e-be42-ef8e53e38ca6"), @@ -115,13 +132,15 @@ def test_to_raillabel( "name": "910399ec-da3e-4d7e-be42-ef8e53e38ca6", "val": point_3d_vec + quaternion_vec + size_3d_vec, "coordinate_system": coordinate_system_lidar_translated_uid, - "attributes": attributes_raillabel_dict + "attributes": attributes_raillabel_dict, } assert object_id == str(bounding_box_3d.object_id) assert translated_class_id == translate_class_id(bounding_box_3d.class_name) assert sensor_reference == sensor_lidar_raillabel_dict + if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear", "-v"]) diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_coordinate_system.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_coordinate_system.py index 10afa7d..fc64dff 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_coordinate_system.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_coordinate_system.py @@ -6,6 +6,7 @@ # == Fixtures ========================= + @pytest.fixture def coordinate_system_camera_uai_dict(point_3d_vec, quaternion_vec) -> dict: return { @@ -19,13 +20,20 @@ def coordinate_system_camera_uai_dict(point_3d_vec, quaternion_vec) -> dict: "homogeneous_transform": [0] * 16, "measured_position": [0, 0, 0], "camera_matrix": [ - 3535, 0, 319.5, - 0, 3535, 239.5, - 0, 0, 1 , + 3535, + 0, + 319.5, + 0, + 3535, + 239.5, + 0, + 0, + 1, ], - "dist_coeffs": [0, 1, 2, 3, 4] + "dist_coeffs": [0, 1, 2, 3, 4], } + @pytest.fixture def coordinate_system_camera_uai(point_3d_vec, quaternion_vec): return uai_format.CoordinateSystem( @@ -39,23 +47,27 @@ def coordinate_system_camera_uai(point_3d_vec, quaternion_vec): homogeneous_transform=[0] * 16, measured_position=[0, 0, 0], camera_matrix=[ - 3535, 0, 319.5, - 0, 3535, 239.5, - 0, 0, 1 , + 3535, + 0, + 319.5, + 0, + 3535, + 239.5, + 0, + 0, + 1, ], - dist_coeffs=[0, 1, 2, 3, 4] + dist_coeffs=[0, 1, 2, 3, 4], ) + @pytest.fixture def coordinate_system_camera_raillabel_dict(point_3d_vec, quaternion_vec) -> dict: return ( { "type": "sensor", "parent": "base", - "pose_wrt_parent": { - "translation": point_3d_vec, - "quaternion": quaternion_vec - } + "pose_wrt_parent": {"translation": point_3d_vec, "quaternion": quaternion_vec}, }, { "type": "camera", @@ -63,18 +75,28 @@ def coordinate_system_camera_raillabel_dict(point_3d_vec, quaternion_vec) -> dic "stream_properties": { "intrinsics_pinhole": { "camera_matrix": [ - 3535, 0, 319.5, 0, - 0, 3535, 239.5, 0, - 0, 0, 1 , 0, + 3535, + 0, + 319.5, + 0, + 0, + 3535, + 239.5, + 0, + 0, + 0, + 1, + 0, ], "distortion_coeffs": [0, 1, 2, 3, 4], "width_px": 640, "height_px": 480, } - } - } + }, + }, ) + @pytest.fixture def coordinate_system_camera_translated_uid() -> dict: return "ir_middle" @@ -93,6 +115,7 @@ def coordinate_system_lidar_uai_dict(point_3d_vec, quaternion_vec) -> dict: "homogeneous_transform": [0] * 16, } + @pytest.fixture def coordinate_system_lidar_uai(point_3d_vec, quaternion_vec): return uai_format.CoordinateSystem( @@ -106,23 +129,22 @@ def coordinate_system_lidar_uai(point_3d_vec, quaternion_vec): homogeneous_transform=[0] * 16, ) + @pytest.fixture def coordinate_system_lidar_raillabel_dict(point_3d_vec, quaternion_vec) -> dict: return ( { "type": "sensor", "parent": "base", - "pose_wrt_parent": { - "translation": point_3d_vec, - "quaternion": quaternion_vec - } + "pose_wrt_parent": {"translation": point_3d_vec, "quaternion": quaternion_vec}, }, { "type": "lidar", "uri": "/lidar_merged", - } + }, ) + @pytest.fixture def coordinate_system_lidar_translated_uid() -> dict: return "lidar" @@ -142,6 +164,7 @@ def coordinate_system_radar_uai_dict(point_3d_vec, quaternion_vec) -> dict: "homogeneous_transform": [0] * 16, } + @pytest.fixture def coordinate_system_radar_uai(point_3d_vec, quaternion_vec): return uai_format.CoordinateSystem( @@ -155,16 +178,14 @@ def coordinate_system_radar_uai(point_3d_vec, quaternion_vec): homogeneous_transform=[0] * 16, ) + @pytest.fixture def coordinate_system_radar_raillabel_dict(point_3d_vec, quaternion_vec) -> dict: return ( { "type": "sensor", "parent": "base", - "pose_wrt_parent": { - "translation": point_3d_vec, - "quaternion": quaternion_vec - } + "pose_wrt_parent": {"translation": point_3d_vec, "quaternion": quaternion_vec}, }, { "type": "radar", @@ -173,18 +194,21 @@ def coordinate_system_radar_raillabel_dict(point_3d_vec, quaternion_vec) -> dict "intrinsics_radar": { "resolution_px_per_m": 2.856, "width_px": 2856, - "height_px": 1428 + "height_px": 1428, } - } - } + }, + }, ) + @pytest.fixture def coordinate_system_radar_translated_uid() -> dict: return "radar" + # == Tests ============================ + def test_fromdict(point_3d_vec, quaternion_vec): coordinate_system = uai_format.CoordinateSystem.fromdict( { @@ -198,11 +222,17 @@ def test_fromdict(point_3d_vec, quaternion_vec): "homogeneous_transform": [0] * 16, "measured_position": [0, 0, 0], "camera_matrix": [ - 3535, 0, 319.5, - 0, 3535, 239.5, - 0, 0, 1 , + 3535, + 0, + 319.5, + 0, + 3535, + 239.5, + 0, + 0, + 1, ], - "dist_coeffs": [0, 1, 2, 3, 4] + "dist_coeffs": [0, 1, 2, 3, 4], } ) @@ -216,9 +246,15 @@ def test_fromdict(point_3d_vec, quaternion_vec): assert coordinate_system.homogeneous_transform == [0] * 16 assert coordinate_system.measured_position == [0, 0, 0] assert coordinate_system.camera_matrix == [ - 3535, 0, 319.5, - 0, 3535, 239.5, - 0, 0, 1 , + 3535, + 0, + 319.5, + 0, + 3535, + 239.5, + 0, + 0, + 1, ] assert coordinate_system.dist_coeffs == [0, 1, 2, 3, 4] @@ -235,22 +271,26 @@ def test_to_raillabel__coordinate_system(point_3d_vec, quaternion_vec): homogeneous_transform=[0] * 16, measured_position=[0, 0, 0], camera_matrix=[ - 3535, 0, 319.5, - 0, 3535, 239.5, - 0, 0, 1 , + 3535, + 0, + 319.5, + 0, + 3535, + 239.5, + 0, + 0, + 1, ], - dist_coeffs=[0, 1, 2, 3, 4] + dist_coeffs=[0, 1, 2, 3, 4], ) assert coordinate_system.to_raillabel()[0] == { "type": "sensor", "parent": "base", - "pose_wrt_parent": { - "translation": point_3d_vec, - "quaternion": quaternion_vec - } + "pose_wrt_parent": {"translation": point_3d_vec, "quaternion": quaternion_vec}, } + def test_to_raillabel__stream__camera(point_3d_vec, quaternion_vec): coordinate_system = uai_format.CoordinateSystem( uid="ir_middle", @@ -263,11 +303,17 @@ def test_to_raillabel__stream__camera(point_3d_vec, quaternion_vec): homogeneous_transform=[0] * 16, measured_position=[0, 0, 0], camera_matrix=[ - 3535, 0, 319.5, - 0, 3535, 239.5, - 0, 0, 1 , + 3535, + 0, + 319.5, + 0, + 3535, + 239.5, + 0, + 0, + 1, ], - dist_coeffs=[0, 1, 2, 3, 4] + dist_coeffs=[0, 1, 2, 3, 4], ) assert coordinate_system.to_raillabel()[1] == { @@ -276,17 +322,27 @@ def test_to_raillabel__stream__camera(point_3d_vec, quaternion_vec): "stream_properties": { "intrinsics_pinhole": { "camera_matrix": [ - 3535, 0, 319.5, 0, - 0, 3535, 239.5, 0, - 0, 0, 1 , 0, + 3535, + 0, + 319.5, + 0, + 0, + 3535, + 239.5, + 0, + 0, + 0, + 1, + 0, ], "distortion_coeffs": [0, 1, 2, 3, 4], "width_px": 640, "height_px": 480, } - } + }, } + def test_to_raillabel__stream__lidar(point_3d_vec, quaternion_vec): coordinate_system = uai_format.CoordinateSystem( uid="LIDAR", @@ -304,6 +360,7 @@ def test_to_raillabel__stream__lidar(point_3d_vec, quaternion_vec): "uri": "/lidar_merged", } + def test_to_raillabel__stream__radar(point_3d_vec, quaternion_vec): coordinate_system = uai_format.CoordinateSystem( uid="radar", @@ -320,16 +377,13 @@ def test_to_raillabel__stream__radar(point_3d_vec, quaternion_vec): "type": "radar", "uri": "/talker1/Nvt/Cartesian", "stream_properties": { - "intrinsics_radar": { - "resolution_px_per_m": 2.856, - "width_px": 2856, - "height_px": 1428 - } - } + "intrinsics_radar": {"resolution_px_per_m": 2.856, "width_px": 2856, "height_px": 1428} + }, } if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear", "-v"]) diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_frame.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_frame.py index 4caa59c..930c55e 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_frame.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_frame.py @@ -7,6 +7,7 @@ # == Fixtures ========================= + @pytest.fixture def frame_uai_dict( bounding_box_2d_uai_dict, @@ -25,9 +26,10 @@ def frame_uai_dict( "2D_POLYGON": [polygon_2d_uai_dict], "3D_BOUNDING_BOX": [bounding_box_3d_uai_dict], "3D_SEGMENTATION": [segmentation_3d_uai_dict], - } + }, } + @pytest.fixture def frame_uai( bounding_box_2d_uai, @@ -47,15 +49,23 @@ def frame_uai( segmentation_3ds={str(segmentation_3d_uai.id): segmentation_3d_uai}, ) + @pytest.fixture def frame_raillabel_dict( - bounding_box_2d_uai, bounding_box_2d_raillabel_dict, + bounding_box_2d_uai, + bounding_box_2d_raillabel_dict, bounding_box_3d_raillabel_dict, - polygon_2d_uai, polygon_2d_raillabel_dict, - polyline_2d_uai, polyline_2d_raillabel_dict, - segmentation_3d_uai, segmentation_3d_raillabel_dict, - sensor_lidar_uai, sensor_lidar_raillabel_dict, coordinate_system_lidar_translated_uid, - sensor_camera_raillabel_dict, coordinate_system_camera_translated_uid, + polygon_2d_uai, + polygon_2d_raillabel_dict, + polyline_2d_uai, + polyline_2d_raillabel_dict, + segmentation_3d_uai, + segmentation_3d_raillabel_dict, + sensor_lidar_uai, + sensor_lidar_raillabel_dict, + coordinate_system_lidar_translated_uid, + sensor_camera_raillabel_dict, + coordinate_system_camera_translated_uid, ) -> dict: return { "frame_properties": { @@ -63,7 +73,7 @@ def frame_raillabel_dict( "streams": { coordinate_system_camera_translated_uid: sensor_camera_raillabel_dict, coordinate_system_lidar_translated_uid: sensor_lidar_raillabel_dict, - } + }, }, "objects": { str(bounding_box_2d_uai.object_id): { @@ -72,33 +82,33 @@ def frame_raillabel_dict( "cuboid": [bounding_box_3d_raillabel_dict], } }, - str(polygon_2d_uai.object_id): { - "object_data": { - "poly2d": [polygon_2d_raillabel_dict] - } - }, + str(polygon_2d_uai.object_id): {"object_data": {"poly2d": [polygon_2d_raillabel_dict]}}, str(polyline_2d_uai.object_id): { - "object_data": { - "poly2d": [polyline_2d_raillabel_dict] - } + "object_data": {"poly2d": [polyline_2d_raillabel_dict]} }, str(segmentation_3d_uai.object_id): { - "object_data": { - "vec": [segmentation_3d_raillabel_dict] - } + "object_data": {"vec": [segmentation_3d_raillabel_dict]} }, - } + }, } + # == Tests ============================ + def test_fromdict( - bounding_box_2d_uai_dict, bounding_box_2d_uai, - bounding_box_3d_uai_dict, bounding_box_3d_uai, - polygon_2d_uai_dict, polygon_2d_uai, - polyline_2d_uai_dict, polyline_2d_uai, - segmentation_3d_uai_dict, segmentation_3d_uai, - sensor_lidar_uai_dict, sensor_lidar_uai, + bounding_box_2d_uai_dict, + bounding_box_2d_uai, + bounding_box_3d_uai_dict, + bounding_box_3d_uai, + polygon_2d_uai_dict, + polygon_2d_uai, + polyline_2d_uai_dict, + polyline_2d_uai, + segmentation_3d_uai_dict, + segmentation_3d_uai, + sensor_lidar_uai_dict, + sensor_lidar_uai, ): frame = uai_format.Frame.fromdict( { @@ -110,7 +120,7 @@ def test_fromdict( "2D_POLYGON": [polygon_2d_uai_dict], "3D_BOUNDING_BOX": [bounding_box_3d_uai_dict], "3D_SEGMENTATION": [segmentation_3d_uai_dict], - } + }, } ) @@ -124,13 +134,21 @@ def test_fromdict( def test_to_raillabel( - bounding_box_2d_uai, bounding_box_2d_raillabel_dict, - bounding_box_3d_uai, bounding_box_3d_raillabel_dict, - polygon_2d_uai, polygon_2d_raillabel_dict, - polyline_2d_uai, polyline_2d_raillabel_dict, - segmentation_3d_uai, segmentation_3d_raillabel_dict, - sensor_lidar_uai, sensor_lidar_raillabel_dict, coordinate_system_lidar_translated_uid, - sensor_camera_raillabel_dict, coordinate_system_camera_translated_uid, + bounding_box_2d_uai, + bounding_box_2d_raillabel_dict, + bounding_box_3d_uai, + bounding_box_3d_raillabel_dict, + polygon_2d_uai, + polygon_2d_raillabel_dict, + polyline_2d_uai, + polyline_2d_raillabel_dict, + segmentation_3d_uai, + segmentation_3d_raillabel_dict, + sensor_lidar_uai, + sensor_lidar_raillabel_dict, + coordinate_system_lidar_translated_uid, + sensor_camera_raillabel_dict, + coordinate_system_camera_translated_uid, ): frame = uai_format.Frame( id=0, @@ -148,7 +166,7 @@ def test_to_raillabel( "streams": { coordinate_system_camera_translated_uid: sensor_camera_raillabel_dict, coordinate_system_lidar_translated_uid: sensor_lidar_raillabel_dict, - } + }, }, "objects": { str(bounding_box_2d_uai.object_id): { @@ -157,28 +175,19 @@ def test_to_raillabel( "cuboid": [bounding_box_3d_raillabel_dict], } }, - str(polygon_2d_uai.object_id): { - "object_data": { - "poly2d": [polygon_2d_raillabel_dict] - } - }, + str(polygon_2d_uai.object_id): {"object_data": {"poly2d": [polygon_2d_raillabel_dict]}}, str(polyline_2d_uai.object_id): { - "object_data": { - "poly2d": [polyline_2d_raillabel_dict] - } + "object_data": {"poly2d": [polyline_2d_raillabel_dict]} }, str(segmentation_3d_uai.object_id): { - "object_data": { - "vec": [segmentation_3d_raillabel_dict] - } + "object_data": {"vec": [segmentation_3d_raillabel_dict]} }, - } + }, } def test_warning_duplicate_annotation_id( - bounding_box_2d_uai_dict, polyline_2d_uai_dict, - sensor_lidar_uai_dict + bounding_box_2d_uai_dict, polyline_2d_uai_dict, sensor_lidar_uai_dict ): polyline_2d_uai_dict["id"] = bounding_box_2d_uai_dict["id"] @@ -191,7 +200,7 @@ def test_warning_duplicate_annotation_id( "2D_POLYGON": [], "3D_BOUNDING_BOX": [], "3D_SEGMENTATION": [], - } + }, } with _WarningsLogger() as logger: @@ -203,5 +212,6 @@ def test_warning_duplicate_annotation_id( if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear", "-vv"]) diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_metadata.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_metadata.py index e49ae76..8a220ca 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_metadata.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_metadata.py @@ -6,6 +6,7 @@ # == Fixtures ========================= + @pytest.fixture def metadata_uai_dict() -> dict: return { @@ -19,6 +20,7 @@ def metadata_uai_dict() -> dict: "folder_name": "2021-09-22-14-28-01_2021-09-22-14-44-03", } + @pytest.fixture def metadata_uai(): return uai_format.Metadata( @@ -32,6 +34,7 @@ def metadata_uai(): folder_name="2021-09-22-14-28-01_2021-09-22-14-44-03", ) + @pytest.fixture def metadata_raillabel_dict(json_data) -> dict: return { @@ -39,12 +42,13 @@ def metadata_raillabel_dict(json_data) -> dict: "schema_version": "1.0.0", "name": "2021-09-22-14-28-01_2021-09-22-14-44-03", "subschema_version": json_data["raillabel_schema"]["version"], - "tagged_file": "2021-09-22-14-28-01_2021-09-22-14-44-03" + "tagged_file": "2021-09-22-14-28-01_2021-09-22-14-44-03", } # == Tests ============================ + def test_fromdict(): metadata = uai_format.Metadata.fromdict( { @@ -86,10 +90,12 @@ def test_to_raillabel(json_data): "schema_version": "1.0.0", "name": "2021-09-22-14-28-01_2021-09-22-14-44-03", "subschema_version": json_data["raillabel_schema"]["version"], - "tagged_file": "2021-09-22-14-28-01_2021-09-22-14-44-03" + "tagged_file": "2021-09-22-14-28-01_2021-09-22-14-44-03", } + if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear", "-v"]) diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_point_3d.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_point_3d.py index a234e22..5c116d2 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_point_3d.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_point_3d.py @@ -6,6 +6,7 @@ # == Fixtures ========================= + @pytest.fixture def point_3d_uai_dict() -> dict: return { @@ -14,6 +15,7 @@ def point_3d_uai_dict() -> dict: "z": 2, } + @pytest.fixture def point_3d_uai() -> dict: return uai_format.Point3d( @@ -22,12 +24,15 @@ def point_3d_uai() -> dict: z=2, ) + @pytest.fixture def point_3d_vec() -> dict: return [0, 1, 2] + # == Tests ============================ + def test_fromdict(): point_3d = uai_format.Point3d.fromdict( { @@ -44,5 +49,6 @@ def test_fromdict(): if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear", "-v"]) diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_polygon_2d.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_polygon_2d.py index b004c82..9a5820c 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_polygon_2d.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_polygon_2d.py @@ -9,6 +9,7 @@ # == Fixtures ========================= + @pytest.fixture def polygon_2d_uai_dict(sensor_camera_uai_dict, attributes_uai_dict) -> dict: return { @@ -17,14 +18,14 @@ def polygon_2d_uai_dict(sensor_camera_uai_dict, attributes_uai_dict) -> dict: "className": "test_class", "geometry": { "points": [ - [127.71153737657284, -0.3861000079676791], - [127.4762636010818, 328.04436391207815], + [127.71153737657284, -0.3861000079676791], + [127.4762636010818, 328.04436391207815], [115.77703250958459, 334.4789410124016], [115.01063176442402, 411.0810690770479], ] }, "attributes": attributes_uai_dict, - "sensor": sensor_camera_uai_dict + "sensor": sensor_camera_uai_dict, } @@ -35,8 +36,8 @@ def polygon_2d_uai(attributes_uai, sensor_camera_uai) -> dict: object_id=UUID("58e7edd8-a7ee-4775-a837-e6dd375e8150"), class_name="test_class", points=[ - (127.71153737657284, -0.3861000079676791), - (127.4762636010818, 328.04436391207815), + (127.71153737657284, -0.3861000079676791), + (127.4762636010818, 328.04436391207815), (115.77703250958459, 334.4789410124016), (115.01063176442402, 411.0810690770479), ], @@ -44,15 +45,22 @@ def polygon_2d_uai(attributes_uai, sensor_camera_uai) -> dict: sensor=sensor_camera_uai, ) + @pytest.fixture -def polygon_2d_raillabel_dict(attributes_raillabel_dict, coordinate_system_camera_translated_uid) -> dict: +def polygon_2d_raillabel_dict( + attributes_raillabel_dict, coordinate_system_camera_translated_uid +) -> dict: return { "name": "0f90cffa-2b6b-4e09-8fc2-527769a94e0a", "val": [ - 127.71153737657284, -0.3861000079676791, - 127.4762636010818, 328.04436391207815, - 115.77703250958459, 334.4789410124016, - 115.01063176442402, 411.0810690770479, + 127.71153737657284, + -0.3861000079676791, + 127.4762636010818, + 328.04436391207815, + 115.77703250958459, + 334.4789410124016, + 115.01063176442402, + 411.0810690770479, ], "mode": "MODE_POLY2D_ABSOLUTE", "closed": True, @@ -63,10 +71,8 @@ def polygon_2d_raillabel_dict(attributes_raillabel_dict, coordinate_system_camer # == Tests ============================ -def test_fromdict( - attributes_uai_dict, attributes_uai, - sensor_camera_uai_dict, sensor_camera_uai -): + +def test_fromdict(attributes_uai_dict, attributes_uai, sensor_camera_uai_dict, sensor_camera_uai): polygon_2d = uai_format.Polygon2d.fromdict( { "id": "0f90cffa-2b6b-4e09-8fc2-527769a94e0a", @@ -74,14 +80,14 @@ def test_fromdict( "className": "test_class", "geometry": { "points": [ - [127.71153737657284, -0.3861000079676791], - [127.4762636010818, 328.04436391207815], + [127.71153737657284, -0.3861000079676791], + [127.4762636010818, 328.04436391207815], [115.77703250958459, 334.4789410124016], [115.01063176442402, 411.0810690770479], ] }, "attributes": attributes_uai_dict, - "sensor": sensor_camera_uai_dict + "sensor": sensor_camera_uai_dict, } ) @@ -89,8 +95,8 @@ def test_fromdict( assert polygon_2d.object_id == UUID("58e7edd8-a7ee-4775-a837-e6dd375e8150") assert polygon_2d.class_name == "test_class" assert polygon_2d.points == [ - (127.71153737657284, -0.3861000079676791), - (127.4762636010818, 328.04436391207815), + (127.71153737657284, -0.3861000079676791), + (127.4762636010818, 328.04436391207815), (115.77703250958459, 334.4789410124016), (115.01063176442402, 411.0810690770479), ] @@ -99,16 +105,19 @@ def test_fromdict( def test_to_raillabel( - attributes_uai, attributes_raillabel_dict, - sensor_camera_uai, sensor_camera_raillabel_dict, coordinate_system_camera_translated_uid, + attributes_uai, + attributes_raillabel_dict, + sensor_camera_uai, + sensor_camera_raillabel_dict, + coordinate_system_camera_translated_uid, ): polygon_2d = uai_format.Polygon2d( id=UUID("0f90cffa-2b6b-4e09-8fc2-527769a94e0a"), object_id=UUID("58e7edd8-a7ee-4775-a837-e6dd375e8150"), class_name="test_class", points=[ - (127.71153737657284, -0.3861000079676791), - (127.4762636010818, 328.04436391207815), + (127.71153737657284, -0.3861000079676791), + (127.4762636010818, 328.04436391207815), (115.77703250958459, 334.4789410124016), (115.01063176442402, 411.0810690770479), ], @@ -121,10 +130,14 @@ def test_to_raillabel( assert data_dict == { "name": "0f90cffa-2b6b-4e09-8fc2-527769a94e0a", "val": [ - 127.71153737657284, -0.3861000079676791, - 127.4762636010818, 328.04436391207815, - 115.77703250958459, 334.4789410124016, - 115.01063176442402, 411.0810690770479, + 127.71153737657284, + -0.3861000079676791, + 127.4762636010818, + 328.04436391207815, + 115.77703250958459, + 334.4789410124016, + 115.01063176442402, + 411.0810690770479, ], "mode": "MODE_POLY2D_ABSOLUTE", "coordinate_system": coordinate_system_camera_translated_uid, @@ -135,7 +148,9 @@ def test_to_raillabel( assert translated_class_id == translate_class_id(polygon_2d.class_name) assert sensor_reference == sensor_camera_raillabel_dict + if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear", "-v"]) diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_polyline_2d.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_polyline_2d.py index 5857eb0..4f82706 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_polyline_2d.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_polyline_2d.py @@ -9,6 +9,7 @@ # == Fixtures ========================= + @pytest.fixture def polyline_2d_uai_dict(sensor_camera_uai_dict, attributes_uai_dict) -> dict: return { @@ -17,14 +18,14 @@ def polyline_2d_uai_dict(sensor_camera_uai_dict, attributes_uai_dict) -> dict: "className": "test_class", "geometry": { "points": [ - [127.71153737657284, -0.3861000079676791], - [127.4762636010818, 328.04436391207815], + [127.71153737657284, -0.3861000079676791], + [127.4762636010818, 328.04436391207815], [115.77703250958459, 334.4789410124016], [115.01063176442402, 411.0810690770479], ] }, "attributes": attributes_uai_dict, - "sensor": sensor_camera_uai_dict + "sensor": sensor_camera_uai_dict, } @@ -35,8 +36,8 @@ def polyline_2d_uai(attributes_uai, sensor_camera_uai) -> dict: object_id=UUID("4d8eca35-6c1d-4159-8062-21c2f2c051df"), class_name="test_class", points=[ - (127.71153737657284, -0.3861000079676791), - (127.4762636010818, 328.04436391207815), + (127.71153737657284, -0.3861000079676791), + (127.4762636010818, 328.04436391207815), (115.77703250958459, 334.4789410124016), (115.01063176442402, 411.0810690770479), ], @@ -44,15 +45,22 @@ def polyline_2d_uai(attributes_uai, sensor_camera_uai) -> dict: sensor=sensor_camera_uai, ) + @pytest.fixture -def polyline_2d_raillabel_dict(attributes_raillabel_dict, coordinate_system_camera_translated_uid) -> dict: +def polyline_2d_raillabel_dict( + attributes_raillabel_dict, coordinate_system_camera_translated_uid +) -> dict: return { "name": "7f2b99b7-61e4-4f9f-96e9-d3e9f583d7c2", "val": [ - 127.71153737657284, -0.3861000079676791, - 127.4762636010818, 328.04436391207815, - 115.77703250958459, 334.4789410124016, - 115.01063176442402, 411.0810690770479, + 127.71153737657284, + -0.3861000079676791, + 127.4762636010818, + 328.04436391207815, + 115.77703250958459, + 334.4789410124016, + 115.01063176442402, + 411.0810690770479, ], "mode": "MODE_POLY2D_ABSOLUTE", "closed": False, @@ -63,10 +71,8 @@ def polyline_2d_raillabel_dict(attributes_raillabel_dict, coordinate_system_came # == Tests ============================ -def test_fromdict( - attributes_uai_dict, attributes_uai, - sensor_camera_uai_dict, sensor_camera_uai -): + +def test_fromdict(attributes_uai_dict, attributes_uai, sensor_camera_uai_dict, sensor_camera_uai): polyline_2d = uai_format.Polyline2d.fromdict( { "id": "7f2b99b7-61e4-4f9f-96e9-d3e9f583d7c2", @@ -74,14 +80,14 @@ def test_fromdict( "className": "test_class", "geometry": { "points": [ - [127.71153737657284, -0.3861000079676791], - [127.4762636010818, 328.04436391207815], + [127.71153737657284, -0.3861000079676791], + [127.4762636010818, 328.04436391207815], [115.77703250958459, 334.4789410124016], [115.01063176442402, 411.0810690770479], ] }, "attributes": attributes_uai_dict, - "sensor": sensor_camera_uai_dict + "sensor": sensor_camera_uai_dict, } ) @@ -89,8 +95,8 @@ def test_fromdict( assert polyline_2d.object_id == UUID("4d8eca35-6c1d-4159-8062-21c2f2c051df") assert polyline_2d.class_name == "test_class" assert polyline_2d.points == [ - (127.71153737657284, -0.3861000079676791), - (127.4762636010818, 328.04436391207815), + (127.71153737657284, -0.3861000079676791), + (127.4762636010818, 328.04436391207815), (115.77703250958459, 334.4789410124016), (115.01063176442402, 411.0810690770479), ] @@ -99,16 +105,19 @@ def test_fromdict( def test_to_raillabel( - attributes_uai, attributes_raillabel_dict, - sensor_camera_uai, sensor_camera_raillabel_dict, coordinate_system_camera_translated_uid, + attributes_uai, + attributes_raillabel_dict, + sensor_camera_uai, + sensor_camera_raillabel_dict, + coordinate_system_camera_translated_uid, ): polyline_2d = uai_format.Polyline2d( id=UUID("7f2b99b7-61e4-4f9f-96e9-d3e9f583d7c2"), object_id=UUID("4d8eca35-6c1d-4159-8062-21c2f2c051df"), class_name="test_class", points=[ - (127.71153737657284, -0.3861000079676791), - (127.4762636010818, 328.04436391207815), + (127.71153737657284, -0.3861000079676791), + (127.4762636010818, 328.04436391207815), (115.77703250958459, 334.4789410124016), (115.01063176442402, 411.0810690770479), ], @@ -121,10 +130,14 @@ def test_to_raillabel( assert data_dict == { "name": "7f2b99b7-61e4-4f9f-96e9-d3e9f583d7c2", "val": [ - 127.71153737657284, -0.3861000079676791, - 127.4762636010818, 328.04436391207815, - 115.77703250958459, 334.4789410124016, - 115.01063176442402, 411.0810690770479, + 127.71153737657284, + -0.3861000079676791, + 127.4762636010818, + 328.04436391207815, + 115.77703250958459, + 334.4789410124016, + 115.01063176442402, + 411.0810690770479, ], "mode": "MODE_POLY2D_ABSOLUTE", "coordinate_system": coordinate_system_camera_translated_uid, @@ -135,7 +148,9 @@ def test_to_raillabel( assert translated_class_id == translate_class_id(polyline_2d.class_name) assert sensor_reference == sensor_camera_raillabel_dict + if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear", "-v"]) diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_quaternion.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_quaternion.py index 7ba5b4c..f83b700 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_quaternion.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_quaternion.py @@ -6,6 +6,7 @@ # == Fixtures ========================= + @pytest.fixture def quaternion_uai_dict() -> dict: return { @@ -15,6 +16,7 @@ def quaternion_uai_dict() -> dict: "w": -0.61338551, } + @pytest.fixture def quaternion_uai() -> dict: return uai_format.Quaternion( @@ -24,12 +26,15 @@ def quaternion_uai() -> dict: w=-0.61338551, ) + @pytest.fixture def quaternion_vec() -> dict: - return [0.75318325, -0.10270147, 0.21430262, -0.61338551] + return [0.75318325, -0.10270147, 0.21430262, -0.61338551] + # == Tests ============================ + def test_fromdict(): quaternion = uai_format.Quaternion.fromdict( { @@ -48,5 +53,6 @@ def test_fromdict(): if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear", "-v"]) diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_scene.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_scene.py index bdbc825..ac24614 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_scene.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_scene.py @@ -7,6 +7,7 @@ # == Fixtures ========================= + @pytest.fixture def scene_uai_dict( metadata_uai_dict, @@ -22,9 +23,10 @@ def scene_uai_dict( coordinate_system_lidar_uai_dict, coordinate_system_radar_uai_dict, ], - "frames": [frame_uai_dict] + "frames": [frame_uai_dict], } + @pytest.fixture def scene_uai( metadata_uai, @@ -40,28 +42,37 @@ def scene_uai( coordinate_system_lidar_uai.uid: coordinate_system_lidar_uai, coordinate_system_radar_uai.uid: coordinate_system_radar_uai, }, - frames={ - frame_uai.id: frame_uai - }, + frames={frame_uai.id: frame_uai}, ) + @pytest.fixture def scene_raillabel_dict( metadata_raillabel_dict, - coordinate_system_camera_uai, coordinate_system_camera_raillabel_dict, - coordinate_system_lidar_uai, coordinate_system_lidar_raillabel_dict, - coordinate_system_radar_uai, coordinate_system_radar_raillabel_dict, + coordinate_system_camera_uai, + coordinate_system_camera_raillabel_dict, + coordinate_system_lidar_uai, + coordinate_system_lidar_raillabel_dict, + coordinate_system_radar_uai, + coordinate_system_radar_raillabel_dict, ) -> dict: return + # == Tests ============================ + def test_fromdict( - metadata_uai_dict, metadata_uai, - coordinate_system_camera_uai_dict, coordinate_system_camera_uai, - coordinate_system_lidar_uai_dict, coordinate_system_lidar_uai, - coordinate_system_radar_uai_dict, coordinate_system_radar_uai, - frame_uai_dict, frame_uai, + metadata_uai_dict, + metadata_uai, + coordinate_system_camera_uai_dict, + coordinate_system_camera_uai, + coordinate_system_lidar_uai_dict, + coordinate_system_lidar_uai, + coordinate_system_radar_uai_dict, + coordinate_system_radar_uai, + frame_uai_dict, + frame_uai, ): scene = uai_format.Scene.fromdict( { @@ -71,7 +82,7 @@ def test_fromdict( coordinate_system_lidar_uai_dict, coordinate_system_radar_uai_dict, ], - "frames": [frame_uai_dict] + "frames": [frame_uai_dict], } ) @@ -81,9 +92,8 @@ def test_fromdict( coordinate_system_lidar_uai.uid: coordinate_system_lidar_uai, coordinate_system_radar_uai.uid: coordinate_system_radar_uai, } - assert scene.frames == { - frame_uai.id: frame_uai - } + assert scene.frames == {frame_uai.id: frame_uai} + def test_fromdict_duplicate_frame_id_warning( metadata_uai_dict, @@ -101,7 +111,7 @@ def test_fromdict_duplicate_frame_id_warning( coordinate_system_lidar_uai_dict, coordinate_system_radar_uai_dict, ], - "frames": [frame_uai_dict, frame_uai_dict] + "frames": [frame_uai_dict, frame_uai_dict], } ) @@ -119,11 +129,15 @@ def test_to_raillabel__metadata(metadata_uai, metadata_raillabel_dict): assert scene.to_raillabel()["openlabel"]["metadata"] == metadata_raillabel_dict + def test_to_raillabel__sensors( metadata_uai, - coordinate_system_camera_uai, coordinate_system_lidar_uai, - coordinate_system_camera_raillabel_dict, coordinate_system_lidar_raillabel_dict, - coordinate_system_camera_translated_uid, coordinate_system_lidar_translated_uid, + coordinate_system_camera_uai, + coordinate_system_lidar_uai, + coordinate_system_camera_raillabel_dict, + coordinate_system_lidar_raillabel_dict, + coordinate_system_camera_translated_uid, + coordinate_system_lidar_translated_uid, ): scene = uai_format.Scene( metadata=metadata_uai, @@ -140,8 +154,8 @@ def test_to_raillabel__sensors( "parent": "", "children": [ coordinate_system_camera_translated_uid, - coordinate_system_lidar_translated_uid - ] + coordinate_system_lidar_translated_uid, + ], }, coordinate_system_camera_translated_uid: coordinate_system_camera_raillabel_dict[0], coordinate_system_lidar_translated_uid: coordinate_system_lidar_raillabel_dict[0], @@ -151,10 +165,13 @@ def test_to_raillabel__sensors( coordinate_system_lidar_translated_uid: coordinate_system_lidar_raillabel_dict[1], } + def test_to_raillabel__frames( metadata_uai, - coordinate_system_camera_uai, coordinate_system_lidar_uai, - frame_uai, frame_raillabel_dict + coordinate_system_camera_uai, + coordinate_system_lidar_uai, + frame_uai, + frame_raillabel_dict, ): scene = uai_format.Scene( metadata=metadata_uai, @@ -162,17 +179,14 @@ def test_to_raillabel__frames( coordinate_system_camera_uai.uid: coordinate_system_camera_uai, coordinate_system_lidar_uai.uid: coordinate_system_lidar_uai, }, - frames={ - frame_uai.id: frame_uai - }, + frames={frame_uai.id: frame_uai}, ) - assert scene.to_raillabel()["openlabel"]["frames"] == { - str(frame_uai.id): frame_raillabel_dict - } + assert scene.to_raillabel()["openlabel"]["frames"] == {str(frame_uai.id): frame_raillabel_dict} if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear", "-vv"]) diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_segmentation_3d.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_segmentation_3d.py index 4371fac..000b255 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_segmentation_3d.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_segmentation_3d.py @@ -9,6 +9,7 @@ # == Fixtures ========================= + @pytest.fixture def segmentation_3d_uai_dict(sensor_lidar_uai_dict, attributes_uai_dict) -> dict: return { @@ -17,12 +18,13 @@ def segmentation_3d_uai_dict(sensor_lidar_uai_dict, attributes_uai_dict) -> dict "className": "test_class", "geometry": { "associatedPoints": [39814, 39815, 39816, 39817, 39818], - "numberOfPointsInBox": 5 + "numberOfPointsInBox": 5, }, "attributes": attributes_uai_dict, "sensor": sensor_lidar_uai_dict, } + @pytest.fixture def segmentation_3d_uai(attributes_uai, sensor_lidar_uai) -> dict: return uai_format.Segmentation3d( @@ -35,21 +37,23 @@ def segmentation_3d_uai(attributes_uai, sensor_lidar_uai) -> dict: sensor=sensor_lidar_uai, ) + @pytest.fixture -def segmentation_3d_raillabel_dict(attributes_raillabel_dict, coordinate_system_lidar_translated_uid) -> dict: +def segmentation_3d_raillabel_dict( + attributes_raillabel_dict, coordinate_system_lidar_translated_uid +) -> dict: return { "name": "13478f94-d556-4f64-a72b-47662e94988e", "val": [39814, 39815, 39816, 39817, 39818], "coordinate_system": coordinate_system_lidar_translated_uid, - "attributes": attributes_raillabel_dict + "attributes": attributes_raillabel_dict, } + # == Tests ============================ -def test_fromdict( - attributes_uai_dict, attributes_uai, - sensor_lidar_uai_dict, sensor_lidar_uai -): + +def test_fromdict(attributes_uai_dict, attributes_uai, sensor_lidar_uai_dict, sensor_lidar_uai): segmentation_3d = uai_format.Segmentation3d.fromdict( { "id": "13478f94-d556-4f64-a72b-47662e94988e", @@ -57,7 +61,7 @@ def test_fromdict( "className": "test_class", "geometry": { "associatedPoints": [39814, 39815, 39816, 39817, 39818], - "numberOfPointsInBox": 5 + "numberOfPointsInBox": 5, }, "attributes": attributes_uai_dict, "sensor": sensor_lidar_uai_dict, @@ -74,8 +78,11 @@ def test_fromdict( def test_to_raillabel( - attributes_uai, attributes_raillabel_dict, - sensor_lidar_uai, sensor_lidar_raillabel_dict, coordinate_system_lidar_translated_uid, + attributes_uai, + attributes_raillabel_dict, + sensor_lidar_uai, + sensor_lidar_raillabel_dict, + coordinate_system_lidar_translated_uid, ): segmentation_3d = uai_format.Segmentation3d( id=UUID("13478f94-d556-4f64-a72b-47662e94988e"), @@ -99,7 +106,9 @@ def test_to_raillabel( assert translated_class_id == translate_class_id(segmentation_3d.class_name) assert sensor_reference == sensor_lidar_raillabel_dict + if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear", "-v"]) diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_sensor_reference.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_sensor_reference.py index 2b7af86..a5b9b6b 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_sensor_reference.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_sensor_reference.py @@ -8,14 +8,16 @@ # == Fixtures ========================= + @pytest.fixture def sensor_camera_uai_dict() -> dict: return { "type": "ir_middle", "uri": "A0001781_image/000_1632321843.100464760.png", - "timestamp": "1632321843.100464760" + "timestamp": "1632321843.100464760", } + @pytest.fixture def sensor_camera_uai() -> dict: return uai_format.SensorReference( @@ -24,15 +26,12 @@ def sensor_camera_uai() -> dict: timestamp=Decimal("1632321843.100464760"), ) + @pytest.fixture def sensor_camera_raillabel_dict() -> dict: return { - "stream_properties": { - "sync": { - "timestamp": "1632321843.100464760" - } - }, - "uri": "000_1632321843.100464760.png" + "stream_properties": {"sync": {"timestamp": "1632321843.100464760"}}, + "uri": "000_1632321843.100464760.png", } @@ -41,9 +40,10 @@ def sensor_lidar_uai_dict() -> dict: return { "type": "LIDAR", "uri": "lidar_merged/000_1632321880.132833000.pcd", - "timestamp": "1632321880.132833000" + "timestamp": "1632321880.132833000", } + @pytest.fixture def sensor_lidar_uai() -> dict: return uai_format.SensorReference( @@ -52,25 +52,24 @@ def sensor_lidar_uai() -> dict: timestamp=Decimal("1632321880.132833000"), ) + @pytest.fixture def sensor_lidar_raillabel_dict() -> dict: return { - "stream_properties": { - "sync": { - "timestamp": "1632321880.132833000" - } - }, - "uri": "000_1632321880.132833000.pcd" + "stream_properties": {"sync": {"timestamp": "1632321880.132833000"}}, + "uri": "000_1632321880.132833000.pcd", } + # == Tests ============================ + def test_fromdict(): sensor_reference = uai_format.SensorReference.fromdict( { "type": "ir_middle", "uri": "A0001781_image/000_1632321843.100464760.png", - "timestamp": "1632321843.100464760" + "timestamp": "1632321843.100464760", } ) @@ -88,15 +87,13 @@ def test_to_raillabel(): assert sensor_reference.to_raillabel()[0] == "ir_middle" assert sensor_reference.to_raillabel()[1] == { - "stream_properties": { - "sync": { - "timestamp": "1632321843.100464760" - } - }, - "uri": "000_1632321843.100464760.png" + "stream_properties": {"sync": {"timestamp": "1632321843.100464760"}}, + "uri": "000_1632321843.100464760.png", } + if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear"]) diff --git a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_size_3d.py b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_size_3d.py index 1a5194c..66c6082 100644 --- a/tests/test_raillabel_providerkit/format/understand_ai/test_uai_size_3d.py +++ b/tests/test_raillabel_providerkit/format/understand_ai/test_uai_size_3d.py @@ -6,6 +6,7 @@ # == Fixtures ========================= + @pytest.fixture def size_3d_uai_dict() -> dict: return { @@ -14,6 +15,7 @@ def size_3d_uai_dict() -> dict: "height": 5, } + @pytest.fixture def size_3d_uai() -> dict: return uai_format.Size3d( @@ -22,12 +24,15 @@ def size_3d_uai() -> dict: height=5, ) + @pytest.fixture def size_3d_vec() -> dict: return [3, 4, 5] + # == Tests ============================ + def test_fromdict(): size_3d = uai_format.Size3d.fromdict( { @@ -44,5 +49,6 @@ def test_fromdict(): if __name__ == "__main__": import os + os.system("clear") pytest.main([__file__, "--disable-pytest-warnings", "--cache-clear", "-v"]) diff --git a/tests/test_raillabel_providerkit/validation/test_validate.py b/tests/test_raillabel_providerkit/validation/test_validate.py index 126024b..80740f0 100644 --- a/tests/test_raillabel_providerkit/validation/test_validate.py +++ b/tests/test_raillabel_providerkit/validation/test_validate.py @@ -7,9 +7,11 @@ # == Tests ============================ + def test_no_errors(demo_onthology, valid_onthology_scene): assert validate(valid_onthology_scene, demo_onthology) == [] + def test_onthology_errors(demo_onthology, invalid_onthology_scene): assert len(validate(invalid_onthology_scene, demo_onthology)) == 1 diff --git a/tests/test_raillabel_providerkit/validation/validate_empty_frame/test_validate_empty_frames.py b/tests/test_raillabel_providerkit/validation/validate_empty_frame/test_validate_empty_frames.py index b0c5274..7ca7f86 100644 --- a/tests/test_raillabel_providerkit/validation/validate_empty_frame/test_validate_empty_frames.py +++ b/tests/test_raillabel_providerkit/validation/validate_empty_frame/test_validate_empty_frames.py @@ -14,12 +14,14 @@ def test_is_frame_empty__true(): frame = raillabel.format.Frame(uid=0, annotations={}) assert _is_frame_empty(frame) + def test_is_frame_empty__false(empty_annotation, empty_frame): frame = empty_frame frame.annotations["581b0df1-c4cf-4a97-828e-13dd740defe5"] = empty_annotation assert not _is_frame_empty(frame) + def test_validate_empty_frames__no_error(default_frame, empty_scene): scene = empty_scene scene.frames = { @@ -30,6 +32,7 @@ def test_validate_empty_frames__no_error(default_frame, empty_scene): assert len(validate_empty_frames(scene)) == 0 + def test_validate_empty_frames__one_error(default_frame, empty_frame, empty_scene): scene = empty_scene scene.frames = { @@ -40,6 +43,7 @@ def test_validate_empty_frames__one_error(default_frame, empty_frame, empty_scen assert len(validate_empty_frames(scene)) == 1 + def test_validate_empty_frames__two_errors(default_frame, empty_frame, empty_scene): scene = empty_scene scene.frames = { @@ -50,6 +54,7 @@ def test_validate_empty_frames__two_errors(default_frame, empty_frame, empty_sce assert len(validate_empty_frames(scene)) == 2 + def test_validate_empty_frames__error_message_contains_indentifying_info(empty_frame, empty_scene): scene = empty_scene scene.frames = { diff --git a/tests/test_raillabel_providerkit/validation/validate_onthology/test_onthology_schema_v1.py b/tests/test_raillabel_providerkit/validation/validate_onthology/test_onthology_schema_v1.py index adaf511..8505e1c 100644 --- a/tests/test_raillabel_providerkit/validation/validate_onthology/test_onthology_schema_v1.py +++ b/tests/test_raillabel_providerkit/validation/validate_onthology/test_onthology_schema_v1.py @@ -11,6 +11,7 @@ # == Fixtures ========================= + @pytest.fixture def schema_path() -> Path: return ( @@ -21,16 +22,19 @@ def schema_path() -> Path: / "onthology_schema_v1.yaml" ) + @pytest.fixture def schema(schema_path) -> dict: with schema_path.open() as f: schema_data = yaml.safe_load(f) return schema_data + @pytest.fixture def validator(schema) -> jsonschema.Draft7Validator: return jsonschema.Draft7Validator(schema) + def schema_errors(data: dict, validator: jsonschema.Draft7Validator) -> t.List[str]: errors = [] @@ -39,8 +43,10 @@ def schema_errors(data: dict, validator: jsonschema.Draft7Validator) -> t.List[s return errors + # == Tests ========================= + def test_classes(validator): data = { "person": {}, @@ -49,12 +55,9 @@ def test_classes(validator): assert schema_errors(data, validator) == [] + def test_class_unsupported_field(validator): - data = { - "person": { - "UNSUPPORTED_FIELD": {} - } - } + data = {"person": {"UNSUPPORTED_FIELD": {}}} assert schema_errors(data, validator) == [ "$.person: Additional properties are not allowed ('UNSUPPORTED_FIELD' was unexpected)", @@ -62,58 +65,36 @@ def test_class_unsupported_field(validator): def test_attributes_field(validator): - data = { - "person": { - "attributes": {} - } - } + data = {"person": {"attributes": {}}} assert schema_errors(data, validator) == [] + def test_attribute_string(validator): - data = { - "person": { - "attributes": { - "name": "string" - } - } - } + data = {"person": {"attributes": {"name": "string"}}} assert schema_errors(data, validator) == [] + def test_attribute_integer(validator): - data = { - "person": { - "attributes": { - "number_of_fingers": "integer" - } - } - } + data = {"person": {"attributes": {"number_of_fingers": "integer"}}} assert schema_errors(data, validator) == [] + def test_attribute_boolean(validator): - data = { - "person": { - "attributes": { - "number_of_fingers": "boolean" - } - } - } + data = {"person": {"attributes": {"number_of_fingers": "boolean"}}} assert schema_errors(data, validator) == [] + def test_attribute_single_select(validator): data = { "person": { "attributes": { "carrying": { "type": "single-select", - "options": [ - "groceries", - "a baby", - "the new Slicer-Dicer 3000 (WOW!)" - ] + "options": ["groceries", "a baby", "the new Slicer-Dicer 3000 (WOW!)"], } } } @@ -121,17 +102,14 @@ def test_attribute_single_select(validator): assert schema_errors(data, validator) == [] + def test_attribute_multi_select(validator): data = { "person": { "attributes": { "carrying": { "type": "multi-select", - "options": [ - "groceries", - "a baby", - "the new Slicer-Dicer 3000 (WOW!)" - ] + "options": ["groceries", "a baby", "the new Slicer-Dicer 3000 (WOW!)"], } } } @@ -139,14 +117,9 @@ def test_attribute_multi_select(validator): assert schema_errors(data, validator) == [] + def test_attribute_vector(validator): - data = { - "person": { - "attributes": { - "carrying": "vector" - } - } - } + data = {"person": {"attributes": {"carrying": "vector"}}} assert schema_errors(data, validator) == [] @@ -164,6 +137,7 @@ def test_sensor_types(validator): assert schema_errors(data, validator) == [] + def test_sensor_types_unsupported_type(validator): data = { "person": { @@ -176,15 +150,12 @@ def test_sensor_types_unsupported_type(validator): assert len(schema_errors(data, validator)) == 1 + def test_sensor_type_attributes(validator): data = { "person": { "sensor_types": { - "lidar": { - "attributes": { - "name": "string" - } - }, + "lidar": {"attributes": {"name": "string"}}, } } } diff --git a/tests/test_raillabel_providerkit/validation/validate_onthology/test_validate_onthology.py b/tests/test_raillabel_providerkit/validation/validate_onthology/test_validate_onthology.py index 926a26d..386fe63 100644 --- a/tests/test_raillabel_providerkit/validation/validate_onthology/test_validate_onthology.py +++ b/tests/test_raillabel_providerkit/validation/validate_onthology/test_validate_onthology.py @@ -12,13 +12,15 @@ # == Helpers ========================== + def make_dict_with_uids(objects: list) -> dict: return {obj.uid: obj for obj in objects} + def build_scene( sensors: t.List[raillabel.format.Sensor], objects: t.List[raillabel.format.Object], - annotations: t.List[t.Type[raillabel.format._ObjectAnnotation]] + annotations: t.List[t.Type[raillabel.format._ObjectAnnotation]], ) -> raillabel.Scene: if type(sensors) == list: sensors = make_dict_with_uids(sensors) @@ -27,14 +29,10 @@ def build_scene( metadata=raillabel.format.Metadata(schema_version="1.0.0"), sensors=sensors, objects=make_dict_with_uids(objects), - frames={ - 0: raillabel.format.Frame( - uid=0, - annotations=make_dict_with_uids(annotations) - ) - } + frames={0: raillabel.format.Frame(uid=0, annotations=make_dict_with_uids(annotations))}, ) + @pytest.fixture def sensors() -> t.List[raillabel.format.Sensor]: return { @@ -52,6 +50,7 @@ def sensors() -> t.List[raillabel.format.Sensor]: ), } + @pytest.fixture def object_person() -> raillabel.format.Object: return raillabel.format.Object( @@ -60,6 +59,7 @@ def object_person() -> raillabel.format.Object: type="person", ) + def build_object(type: str) -> raillabel.format.Object: return raillabel.format.Object( uid=uuid4, @@ -67,14 +67,15 @@ def build_object(type: str) -> raillabel.format.Object: type=type, ) + def build_annotation( object: raillabel.format.Object, - uid: str="a3f3abe5-082d-42ce-966c-bae9c6dae9d9", - sensor: raillabel.format.Sensor=raillabel.format.Sensor( + uid: str = "a3f3abe5-082d-42ce-966c-bae9c6dae9d9", + sensor: raillabel.format.Sensor = raillabel.format.Sensor( uid="rgb_middle", type=raillabel.format.SensorType.CAMERA, ), - attributes: dict={} + attributes: dict = {}, ) -> raillabel.format.Bbox: return raillabel.format.Bbox( uid=uid, @@ -85,12 +86,15 @@ def build_annotation( size=[], ) + # == Fixtures ========================= + @pytest.fixture def metadata(): return raillabel.format.Metadata(schema_version="1.0.0") + @pytest.fixture def demo_onthology() -> dict: return { @@ -98,34 +102,38 @@ def demo_onthology() -> dict: "train": {}, } + @pytest.fixture def valid_onthology_scene(metadata) -> raillabel.Scene: return raillabel.format.Scene( metadata=metadata, - objects=make_dict_with_uids([ - build_object("person"), - build_object("person"), - build_object("train"), - ]) + objects=make_dict_with_uids( + [ + build_object("person"), + build_object("person"), + build_object("train"), + ] + ), ) + @pytest.fixture def invalid_onthology_scene(metadata) -> raillabel.Scene: return raillabel.format.Scene( metadata=metadata, - objects=make_dict_with_uids([ - build_object("INVALID_CLASS"), - ]) + objects=make_dict_with_uids( + [ + build_object("INVALID_CLASS"), + ] + ), ) + # == Tests ============================ + def test_onthology_schema_invalid(): - onthology = { - "person": { - "INVALID_FIELD": {} - } - } + onthology = {"person": {"INVALID_FIELD": {}}} with pytest.raises(exceptions.OnthologySchemaError): validate_onthology(None, onthology) @@ -139,15 +147,18 @@ def test_valid_classes(metadata): scene = raillabel.format.Scene( metadata=metadata, - objects=make_dict_with_uids([ - build_object("person"), - build_object("person"), - build_object("train"), - ]) + objects=make_dict_with_uids( + [ + build_object("person"), + build_object("person"), + build_object("train"), + ] + ), ) assert validate_onthology(scene, onthology) == [] + def test_invalid_class(metadata): onthology = { "person": {}, @@ -156,30 +167,24 @@ def test_invalid_class(metadata): scene = raillabel.format.Scene( metadata=metadata, - objects=make_dict_with_uids([ - build_object("person"), - build_object("UNDEFINED_CLASS"), - ]) + objects=make_dict_with_uids( + [ + build_object("person"), + build_object("UNDEFINED_CLASS"), + ] + ), ) - assert validate_onthology(scene, onthology) == [ - "Object type 'UNDEFINED_CLASS' is not defined." - ] + assert validate_onthology(scene, onthology) == ["Object type 'UNDEFINED_CLASS' is not defined."] def test_undefined_attribute(sensors, object_person): onthology = { - "person": { - "attributes": {} - }, + "person": {"attributes": {}}, } annotation = build_annotation( - object=object_person, - sensor=sensors["lidar"], - attributes={ - "UNKNOWN_ATTRIBUTE": 10 - } + object=object_person, sensor=sensors["lidar"], attributes={"UNKNOWN_ATTRIBUTE": 10} ) scene = build_scene(sensors, [object_person], [annotation]) @@ -187,20 +192,13 @@ def test_undefined_attribute(sensors, object_person): f"Undefined attribute 'UNKNOWN_ATTRIBUTE' in annotation {annotation.uid}." ] + def test_missing_attribute(sensors, object_person): onthology = { - "person": { - "attributes": { - "number_of_fingers": "integer" - } - }, + "person": {"attributes": {"number_of_fingers": "integer"}}, } - annotation = build_annotation( - object=object_person, - sensor=sensors["lidar"], - attributes={} - ) + annotation = build_annotation(object=object_person, sensor=sensors["lidar"], attributes={}) scene = build_scene(sensors, [object_person], [annotation]) assert validate_onthology(scene, onthology) == [ @@ -210,39 +208,26 @@ def test_missing_attribute(sensors, object_person): def test_valid_integer_attribute(sensors, object_person): onthology = { - "person": { - "attributes": { - "number_of_fingers": "integer" - } - }, + "person": {"attributes": {"number_of_fingers": "integer"}}, } annotation = build_annotation( - object=object_person, - sensor=sensors["lidar"], - attributes={ - "number_of_fingers": 10 - } + object=object_person, sensor=sensors["lidar"], attributes={"number_of_fingers": 10} ) scene = build_scene(sensors, [object_person], [annotation]) assert validate_onthology(scene, onthology) == [] + def test_false_integer_attribute_type(sensors, object_person): onthology = { - "person": { - "attributes": { - "number_of_fingers": "integer" - } - }, + "person": {"attributes": {"number_of_fingers": "integer"}}, } annotation = build_annotation( object=object_person, sensor=sensors["lidar"], - attributes={ - "number_of_fingers": "THIS SHOULD BE AN INTEGER" - } + attributes={"number_of_fingers": "THIS SHOULD BE AN INTEGER"}, ) scene = build_scene(sensors, [object_person], [annotation]) @@ -250,41 +235,27 @@ def test_false_integer_attribute_type(sensors, object_person): f"Attribute 'number_of_fingers' of annotation {annotation.uid} is of type 'str' (should be 'int')." ] + def test_valid_string_attribute(sensors, object_person): onthology = { - "person": { - "attributes": { - "first_name": "string" - } - }, + "person": {"attributes": {"first_name": "string"}}, } annotation = build_annotation( - object=object_person, - sensor=sensors["lidar"], - attributes={ - "first_name": "Gudrun" - } + object=object_person, sensor=sensors["lidar"], attributes={"first_name": "Gudrun"} ) scene = build_scene(sensors, [object_person], [annotation]) assert validate_onthology(scene, onthology) == [] + def test_false_string_attribute_type(sensors, object_person): onthology = { - "person": { - "attributes": { - "first_name": "string" - } - }, + "person": {"attributes": {"first_name": "string"}}, } annotation = build_annotation( - object=object_person, - sensor=sensors["lidar"], - attributes={ - "first_name": 42 - } + object=object_person, sensor=sensors["lidar"], attributes={"first_name": 42} ) scene = build_scene(sensors, [object_person], [annotation]) @@ -292,41 +263,29 @@ def test_false_string_attribute_type(sensors, object_person): f"Attribute 'first_name' of annotation {annotation.uid} is of type 'int' (should be 'str')." ] + def test_valid_boolean_attribute(sensors, object_person): onthology = { - "person": { - "attributes": { - "has_cool_blue_shirt": "boolean" - } - }, + "person": {"attributes": {"has_cool_blue_shirt": "boolean"}}, } annotation = build_annotation( - object=object_person, - sensor=sensors["lidar"], - attributes={ - "has_cool_blue_shirt": False - } + object=object_person, sensor=sensors["lidar"], attributes={"has_cool_blue_shirt": False} ) scene = build_scene(sensors, [object_person], [annotation]) assert validate_onthology(scene, onthology) == [] + def test_false_boolean_attribute_type(sensors, object_person): onthology = { - "person": { - "attributes": { - "has_cool_blue_shirt": "boolean" - } - }, + "person": {"attributes": {"has_cool_blue_shirt": "boolean"}}, } annotation = build_annotation( object=object_person, sensor=sensors["lidar"], - attributes={ - "has_cool_blue_shirt": "NO THE SHIRT IS ORANGE ... AND THIS SHOULD BE A BOOL" - } + attributes={"has_cool_blue_shirt": "NO THE SHIRT IS ORANGE ... AND THIS SHOULD BE A BOOL"}, ) scene = build_scene(sensors, [object_person], [annotation]) @@ -334,33 +293,25 @@ def test_false_boolean_attribute_type(sensors, object_person): f"Attribute 'has_cool_blue_shirt' of annotation {annotation.uid} is of type 'str' (should be 'bool')." ] + def test_valid_vector_attribute(sensors, object_person): onthology = { - "person": { - "attributes": { - "favorite_pizzas": "vector" - } - }, + "person": {"attributes": {"favorite_pizzas": "vector"}}, } annotation = build_annotation( object=object_person, sensor=sensors["lidar"], - attributes={ - "favorite_pizzas": ["Diavolo", "Neapolitan", "Quattro Formaggi"] - } + attributes={"favorite_pizzas": ["Diavolo", "Neapolitan", "Quattro Formaggi"]}, ) scene = build_scene(sensors, [object_person], [annotation]) assert validate_onthology(scene, onthology) == [] + def test_false_vector_attribute_type(sensors, object_person): onthology = { - "person": { - "attributes": { - "favorite_pizzas": "vector" - } - }, + "person": {"attributes": {"favorite_pizzas": "vector"}}, } annotation = build_annotation( @@ -368,7 +319,7 @@ def test_false_vector_attribute_type(sensors, object_person): sensor=sensors["lidar"], attributes={ "favorite_pizzas": "does not like pizza (ikr)... THIS SHOULD BE A VECTOR AS WELL" - } + }, ) scene = build_scene(sensors, [object_person], [annotation]) @@ -376,6 +327,7 @@ def test_false_vector_attribute_type(sensors, object_person): f"Attribute 'favorite_pizzas' of annotation {annotation.uid} is of type 'str' (should be 'list')." ] + def test_valid_single_select_attribute(sensors, object_person): onthology = { "person": { @@ -386,23 +338,20 @@ def test_valid_single_select_attribute(sensors, object_person): "groceries", "a baby", "the SlicerDicer 3000™ (wow!)", - ] + ], } } }, } annotation = build_annotation( - object=object_person, - sensor=sensors["lidar"], - attributes={ - "carries": "groceries" - } + object=object_person, sensor=sensors["lidar"], attributes={"carries": "groceries"} ) scene = build_scene(sensors, [object_person], [annotation]) assert validate_onthology(scene, onthology) == [] + def test_false_single_select_attribute_type(sensors, object_person): onthology = { "person": { @@ -413,18 +362,14 @@ def test_false_single_select_attribute_type(sensors, object_person): "groceries", "a baby", "the SlicerDicer 3000™ (wow!)", - ] + ], } } }, } annotation = build_annotation( - object=object_person, - sensor=sensors["lidar"], - attributes={ - "carries": False - } + object=object_person, sensor=sensors["lidar"], attributes={"carries": False} ) scene = build_scene(sensors, [object_person], [annotation]) @@ -432,6 +377,7 @@ def test_false_single_select_attribute_type(sensors, object_person): f"Attribute 'carries' of annotation {annotation.uid} is of type 'bool' (should be 'str')." ] + def test_single_select_attribute_undefined_option(sensors, object_person): onthology = { "person": { @@ -442,7 +388,7 @@ def test_single_select_attribute_undefined_option(sensors, object_person): "groceries", "a baby", "the SlicerDicer 3000™ (wow!)", - ] + ], } } }, @@ -451,9 +397,7 @@ def test_single_select_attribute_undefined_option(sensors, object_person): annotation = build_annotation( object=object_person, sensor=sensors["lidar"], - attributes={ - "carries": "something very unexpected" - } + attributes={"carries": "something very unexpected"}, ) scene = build_scene(sensors, [object_person], [annotation]) @@ -462,6 +406,7 @@ def test_single_select_attribute_undefined_option(sensors, object_person): + "'something very unexpected' (defined options: 'a baby', 'groceries', 'the SlicerDicer 3000™ (wow!)')." ] + def test_valid_multi_select_attribute(sensors, object_person): onthology = { "person": { @@ -472,7 +417,7 @@ def test_valid_multi_select_attribute(sensors, object_person): "groceries", "a baby", "the SlicerDicer 3000™ (wow!)", - ] + ], } } }, @@ -481,14 +426,13 @@ def test_valid_multi_select_attribute(sensors, object_person): annotation = build_annotation( object=object_person, sensor=sensors["lidar"], - attributes={ - "carries": ["groceries", "a baby"] - } + attributes={"carries": ["groceries", "a baby"]}, ) scene = build_scene(sensors, [object_person], [annotation]) assert validate_onthology(scene, onthology) == [] + def test_false_multi_select_attribute_type(sensors, object_person): onthology = { "person": { @@ -499,18 +443,14 @@ def test_false_multi_select_attribute_type(sensors, object_person): "groceries", "a baby", "the SlicerDicer 3000™ (wow!)", - ] + ], } } }, } annotation = build_annotation( - object=object_person, - sensor=sensors["lidar"], - attributes={ - "carries": "a baby" - } + object=object_person, sensor=sensors["lidar"], attributes={"carries": "a baby"} ) scene = build_scene(sensors, [object_person], [annotation]) @@ -518,6 +458,7 @@ def test_false_multi_select_attribute_type(sensors, object_person): f"Attribute 'carries' of annotation {annotation.uid} is of type 'str' (should be 'list')." ] + def test_multi_select_attribute_undefined_option(sensors, object_person): onthology = { "person": { @@ -528,7 +469,7 @@ def test_multi_select_attribute_undefined_option(sensors, object_person): "groceries", "a baby", "the SlicerDicer 3000™ (wow!)", - ] + ], } } }, @@ -537,9 +478,7 @@ def test_multi_select_attribute_undefined_option(sensors, object_person): annotation = build_annotation( object=object_person, sensor=sensors["lidar"], - attributes={ - "carries": ["a baby", "something very unexpected"] - } + attributes={"carries": ["a baby", "something very unexpected"]}, ) scene = build_scene(sensors, [object_person], [annotation]) @@ -548,6 +487,7 @@ def test_multi_select_attribute_undefined_option(sensors, object_person): + "'something very unexpected' (defined options: 'a baby', 'groceries', 'the SlicerDicer 3000™ (wow!)')." ] + def test_multiple_attributes_valid(sensors, object_person): onthology = { "person": { @@ -560,8 +500,8 @@ def test_multiple_attributes_valid(sensors, object_person): "groceries", "a baby", "the SlicerDicer 3000™ (wow!)", - ] - } + ], + }, } } } @@ -573,12 +513,13 @@ def test_multiple_attributes_valid(sensors, object_person): "carries": "groceries", "number_of_fingers": 9, "first_name": "Brunhilde", - } + }, ) scene = build_scene(sensors, [object_person], [annotation]) assert validate_onthology(scene, onthology) == [] + def test_multiple_attributes_invalid(sensors, object_person): onthology = { "person": { @@ -591,8 +532,8 @@ def test_multiple_attributes_invalid(sensors, object_person): "groceries", "a baby", "the SlicerDicer 3000™ (wow!)", - ] - } + ], + }, } } } @@ -604,7 +545,7 @@ def test_multiple_attributes_invalid(sensors, object_person): "carries": "something very unexpected", "number_of_fingers": 9, "first_name": True, - } + }, ) scene = build_scene(sensors, [object_person], [annotation]) @@ -618,47 +559,24 @@ def test_multiple_attributes_invalid(sensors, object_person): def test_valid_sensor_type_attribute(sensors, object_person): onthology = { - "person": { - "sensor_types": { - "lidar": { - "attributes": { - "number_of_fingers": "integer" - } - } - } - }, + "person": {"sensor_types": {"lidar": {"attributes": {"number_of_fingers": "integer"}}}}, } annotation = build_annotation( - object=object_person, - sensor=sensors["lidar"], - attributes={ - "number_of_fingers": 10 - } + object=object_person, sensor=sensors["lidar"], attributes={"number_of_fingers": 10} ) scene = build_scene(sensors, [object_person], [annotation]) assert validate_onthology(scene, onthology) == [] + def test_invalid_sensor_type_attribute(sensors, object_person): onthology = { - "person": { - "sensor_types": { - "lidar": { - "attributes": { - "number_of_fingers": "integer" - } - } - } - }, + "person": {"sensor_types": {"lidar": {"attributes": {"number_of_fingers": "integer"}}}}, } annotation = build_annotation( - object=object_person, - sensor=sensors["lidar"], - attributes={ - "number_of_fingers": "None" - } + object=object_person, sensor=sensors["lidar"], attributes={"number_of_fingers": "None"} ) scene = build_scene(sensors, [object_person], [annotation]) @@ -670,16 +588,8 @@ def test_invalid_sensor_type_attribute(sensors, object_person): def test_valid_sensor_type_attributes_and_attributes(sensors, object_person): onthology = { "person": { - "attributes": { - "first_name": "string" - }, - "sensor_types": { - "lidar": { - "attributes": { - "number_of_fingers": "integer" - } - } - } + "attributes": {"first_name": "string"}, + "sensor_types": {"lidar": {"attributes": {"number_of_fingers": "integer"}}}, }, } @@ -689,25 +599,18 @@ def test_valid_sensor_type_attributes_and_attributes(sensors, object_person): attributes={ "number_of_fingers": 10, "first_name": "Brunhilde", - } + }, ) scene = build_scene(sensors, [object_person], [annotation]) assert validate_onthology(scene, onthology) == [] + def test_invalid_sensor_type_attributes_and_attributes(sensors, object_person): onthology = { "person": { - "attributes": { - "first_name": "string" - }, - "sensor_types": { - "lidar": { - "attributes": { - "number_of_fingers": "integer" - } - } - } + "attributes": {"first_name": "string"}, + "sensor_types": {"lidar": {"attributes": {"number_of_fingers": "integer"}}}, }, } @@ -716,7 +619,7 @@ def test_invalid_sensor_type_attributes_and_attributes(sensors, object_person): sensor=sensors["lidar"], attributes={ "first_name": "Brunhilde", - } + }, ) scene = build_scene(sensors, [object_person], [annotation])