Skip to content

Commit

Permalink
unittests
Browse files Browse the repository at this point in the history
  • Loading branch information
thibaultdvx committed Sep 19, 2024
1 parent cf4edb6 commit da46988
Show file tree
Hide file tree
Showing 9 changed files with 510 additions and 0 deletions.
Empty file.
Empty file.
60 changes: 60 additions & 0 deletions tests/unittests/monai_metrics/config/test_classification.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import pytest
from pydantic import ValidationError

from clinicadl.monai_metrics.config.classification import (
ROCAUCConfig,
create_confusion_matrix_config,
)
from clinicadl.monai_metrics.config.enum import ConfusionMatrixMetric


# ROCAUC
def test_fails_validations_rocauc():
with pytest.raises(ValidationError):
ROCAUCConfig(average="abc")


def test_ROCAUCConfig():
config = ROCAUCConfig(
average="macro",
)
assert config.metric == "ROCAUCMetric"
assert config.average == "macro"


# Confusion Matrix
@pytest.mark.parametrize(
"bad_inputs",
[
{"reduction": "abc"},
{"get_not_nans": True},
],
)
def test_fails_validations_cmatrix(bad_inputs):
for m in ConfusionMatrixMetric:
config_class = create_confusion_matrix_config(m.value)
with pytest.raises(ValidationError):
config_class(**bad_inputs)


def test_passes_validations_cmatrix():
for m in ConfusionMatrixMetric:
config_class = create_confusion_matrix_config(m.value)
config_class(
reduction="mean",
get_not_nans=False,
compute_sample=False,
)


def test_ConfusionMatrixMetricConfig():
for m in ConfusionMatrixMetric:
config_class = create_confusion_matrix_config(m.value)
config = config_class(
reduction="sum",
)
assert config.metric == "ConfusionMatrixMetric"
assert config.reduction == "sum"
assert config.metric_name == m.value
assert config.include_background == "DefaultFromLibrary"
assert not config.get_not_nans
37 changes: 37 additions & 0 deletions tests/unittests/monai_metrics/config/test_factory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import pytest

from clinicadl.monai_metrics.config import ImplementedMetrics, create_metric_config


def test_create_training_config():
for metric in [e.value for e in ImplementedMetrics]:
if metric == "Loss":
with pytest.raises(ValueError):
create_metric_config(metric)
else:
create_metric_config(metric)

config_class = create_metric_config("Hausdorff distance")
config = config_class(
include_background=True,
distance_metric="taxicab",
reduction="sum",
percentile=50,
)
assert config.metric == "HausdorffDistanceMetric"
assert config.include_background
assert config.distance_metric == "taxicab"
assert config.reduction == "sum"
assert config.percentile == 50
assert config.directed == "DefaultFromLibrary"
assert not config.get_not_nans

config_class = create_metric_config("F1 score")
config = config_class(
include_background=True,
compute_sample=True,
)
assert config.metric == "ConfusionMatrixMetric"
assert config.include_background
assert config.compute_sample
assert config.metric_name == "f1 score"
17 changes: 17 additions & 0 deletions tests/unittests/monai_metrics/config/test_generation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import pytest
from pydantic import ValidationError

from clinicadl.monai_metrics.config.generation import MMDMetricConfig


def test_fails_validation():
with pytest.raises(ValidationError):
MMDMetricConfig(kernel_bandwidth=0)


def test_MMDMetricConfig():
config = MMDMetricConfig(
kernel_bandwidth=2.0,
)
assert config.metric == "MMDMetric"
assert config.kernel_bandwidth == 2.0
121 changes: 121 additions & 0 deletions tests/unittests/monai_metrics/config/test_reconstruction.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
import pytest
from pydantic import ValidationError

from clinicadl.monai_metrics.config.reconstruction import (
MultiScaleSSIMConfig,
PSNRConfig,
SSIMConfig,
)


# PSNR #
@pytest.mark.parametrize(
"bad_inputs",
[
{"max_val": 255, "reduction": "abc"},
{"max_val": 255, "get_not_nans": True},
{"max_val": 0},
],
)
def test_fails_validation_psnr(bad_inputs):
with pytest.raises(ValidationError):
PSNRConfig(**bad_inputs)


@pytest.mark.parametrize(
"good_inputs",
[
{"max_val": 255, "reduction": "sum"},
{"max_val": 255, "reduction": "mean"},
{"max_val": 255, "get_not_nans": False},
],
)
def test_passes_validations_psnr(good_inputs):
PSNRConfig(**good_inputs)


def test_PSNRConfig():
config = PSNRConfig(
max_val=7,
reduction="sum",
)
assert config.metric == "PSNRMetric"
assert config.max_val == 7
assert config.reduction == "sum"
assert not config.get_not_nans


# SSIM #
@pytest.mark.parametrize(
"bad_inputs",
[
{"spatial_dims": 1},
{"spatial_dims": 2, "data_range": 0},
{"spatial_dims": 2, "kernel_type": "abc"},
{"spatial_dims": 2, "win_size": 0},
{"spatial_dims": 2, "win_size": (1, 2, 3)},
{"spatial_dims": 2, "kernel_sigma": 0},
{"spatial_dims": 2, "kernel_sigma": (1.0, 2.0, 3.0)},
{"spatial_dims": 2, "k1": -1.0},
{"spatial_dims": 2, "k2": -0.01},
],
)
def test_fails_validations(bad_inputs):
with pytest.raises(ValidationError):
SSIMConfig(**bad_inputs)
with pytest.raises(ValidationError):
MultiScaleSSIMConfig(**bad_inputs)


def test_fails_validation_msssim():
with pytest.raises(ValidationError):
MultiScaleSSIMConfig(spatial_dims=2, weights=(0.0, 1.0))
with pytest.raises(ValidationError):
MultiScaleSSIMConfig(spatial_dims=2, weights=1.0)


@pytest.mark.parametrize(
"good_inputs",
[
{
"spatial_dims": 2,
"data_range": 1,
"kernel_type": "gaussian",
"win_size": 10,
"kernel_sigma": 1.0,
"k1": 1.0,
"k2": 1.0,
"weights": [1.0, 2.0],
},
{"spatial_dims": 2, "win_size": (1, 2), "kernel_sigma": (1.0, 2.0)},
],
)
def test_passes_validations(good_inputs):
MultiScaleSSIMConfig(**good_inputs)
SSIMConfig(**good_inputs)


def test_SSIMConfig():
config = SSIMConfig(
spatial_dims=2,
reduction="sum",
k1=1.0,
)
assert config.metric == "SSIMMetric"
assert config.reduction == "sum"
assert config.spatial_dims == 2
assert config.k1 == 1.0
assert config.k2 == "DefaultFromLibrary"


def test_MultiScaleSSIMMetric():
config = MultiScaleSSIMConfig(
spatial_dims=2, reduction="sum", k1=1.0, weights=[1.0], win_size=10
)
assert config.metric == "MultiScaleSSIMMetric"
assert config.reduction == "sum"
assert config.spatial_dims == 2
assert config.win_size == 10
assert config.k1 == 1.0
assert config.k2 == "DefaultFromLibrary"
assert config.weights == (1.0,)
65 changes: 65 additions & 0 deletions tests/unittests/monai_metrics/config/test_regression.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import pytest
from pydantic import ValidationError

from clinicadl.monai_metrics.config.regression import (
MAEConfig,
MSEConfig,
RMSEConfig,
)


@pytest.mark.parametrize(
"bad_inputs",
[
{"reduction": "abc"},
{"get_not_nans": True},
],
)
def test_fails_validations(bad_inputs):
with pytest.raises(ValidationError):
MAEConfig(**bad_inputs)
with pytest.raises(ValidationError):
MSEConfig(**bad_inputs)
with pytest.raises(ValidationError):
RMSEConfig(**bad_inputs)


@pytest.mark.parametrize(
"good_inputs",
[
{"reduction": "sum"},
{"reduction": "mean"},
{"get_not_nans": False},
],
)
def test_passes_validations(good_inputs):
MAEConfig(**good_inputs)
MSEConfig(**good_inputs)
RMSEConfig(**good_inputs)


def test_MAEConfig():
config = MAEConfig(
reduction="sum",
)
assert config.metric == "MAEMetric"
assert config.reduction == "sum"
assert not config.get_not_nans


def test_MSEConfig():
config = MSEConfig(
reduction="sum",
)
assert config.metric == "MSEMetric"
assert config.reduction == "sum"
assert not config.get_not_nans


def test_RMSEConfig():
config = RMSEConfig(
reduction="sum",
)
assert config.metric == "RMSEMetric"
assert config.reduction == "sum"
assert not config.get_not_nans
Loading

0 comments on commit da46988

Please sign in to comment.