From d1540dcd8fee8da1c269a89aef57dae28c824831 Mon Sep 17 00:00:00 2001 From: NicolasGensollen Date: Mon, 27 May 2024 10:06:29 +0200 Subject: [PATCH] fix broken unit tests --- clinica/utils/caps.py | 21 +++++++++++-------- clinica/utils/testing_utils.py | 5 +++++ .../t1_linear/test_anat_linear_pipeline.py | 4 ++-- test/unittests/utils/test_caps.py | 3 ++- test/unittests/utils/test_utils_inputs.py | 18 ++++++++++++++++ 5 files changed, 39 insertions(+), 12 deletions(-) diff --git a/clinica/utils/caps.py b/clinica/utils/caps.py index 428447eab..f3b15adf0 100644 --- a/clinica/utils/caps.py +++ b/clinica/utils/caps.py @@ -7,17 +7,13 @@ from cattr.gen import make_dict_unstructure_fn, override from cattr.preconf.json import make_converter -# from clinica.utils.input import DatasetType from clinica.utils.bids import BIDS_VERSION +from clinica.utils.exceptions import ClinicaCAPSError +from clinica.utils.inputs import DatasetType CAPS_VERSION = "1.0.0" -class DatasetType(str, Enum): - RAW = "raw" - DERIVATIVE = "derivative" - - @define class CAPSDatasetDescription: """Model representing a CAPS dataset description.""" @@ -55,7 +51,7 @@ def from_file(cls, json_file: Path): DatasetType(parsed["DatasetType"]), ) except KeyError: - raise ValueError( + raise ClinicaCAPSError( f"CAPS dataset_description.json file {json_file} is not valid and " "cannot be parsed as a CAPSDatasetDescription. " "Please verify that the file is well formatted." @@ -103,17 +99,24 @@ def write_caps_dataset_description( caps_version: Optional[str] = None, ) -> None: """Write `dataset_description.json` at the root of the CAPS directory.""" + from clinica.utils.stream import cprint + new_desc = CAPSDatasetDescription.from_values(name, bids_version, caps_version) if (caps_dir / "dataset_description.json").exists(): - print(f"The CAPS dataset already contains a dataset_description.json file.") + cprint( + f"The CAPS dataset {name} already contains a dataset_description.json file.", + lvl="info", + ) previous_desc = CAPSDatasetDescription.from_file( caps_dir / "dataset_description.json" ) if not previous_desc.is_compatible_with(new_desc): - raise ValueError( + msg = ( f"Impossible to write the dataset_description.json file in {caps_dir} " "because it already exists and it contains incompatible metadata." ) + cprint(msg, lvl="error") + raise ClinicaCAPSError(msg) if previous_desc.name != new_desc.name: new_desc.name = f"{previous_desc.name} + {new_desc.name}" with open(caps_dir / "dataset_description.json", "w") as f: diff --git a/clinica/utils/testing_utils.py b/clinica/utils/testing_utils.py index af0342dd1..e4d55f31d 100644 --- a/clinica/utils/testing_utils.py +++ b/clinica/utils/testing_utils.py @@ -72,6 +72,11 @@ def build_caps_directory(directory: os.PathLike, configuration: dict) -> None: This function is a simple prototype for creating fake datasets for testing. """ directory = Path(directory) + with open(directory / "dataset_description.json", "w") as fp: + json.dump( + {"Name": "Example dataset", "BIDSVersion": "1.0.2", "CAPSVersion": "1.0.0"}, + fp, + ) _build_groups(directory, configuration) _build_subjects(directory, configuration) diff --git a/test/unittests/pipelines/t1_linear/test_anat_linear_pipeline.py b/test/unittests/pipelines/t1_linear/test_anat_linear_pipeline.py index 03c3ae3cb..ff3ca9947 100644 --- a/test/unittests/pipelines/t1_linear/test_anat_linear_pipeline.py +++ b/test/unittests/pipelines/t1_linear/test_anat_linear_pipeline.py @@ -23,7 +23,7 @@ def test_anat_linear_pipeline_single_bids_input_error(tmp_path): with pytest.raises( ClinicaBIDSError, match=re.escape( - f"The BIDS directory ({tmp_path}) you provided " + f"The raw directory ({tmp_path}) you provided " "is missing a dataset_description.json file." ), ): @@ -37,7 +37,7 @@ def test_anat_linear_pipeline_single_caps_input_error(tmp_path): with pytest.raises( ClinicaCAPSError, match=re.escape( - f"The CAPS directory ({tmp_path}) you provided " + f"The derivative directory ({tmp_path}) you provided " "is missing a dataset_description.json file." ), ): diff --git a/test/unittests/utils/test_caps.py b/test/unittests/utils/test_caps.py index 33782d457..e0ee9b416 100644 --- a/test/unittests/utils/test_caps.py +++ b/test/unittests/utils/test_caps.py @@ -66,6 +66,7 @@ def test_write_caps_dataset_description_error(tmp_path): DatasetType, write_caps_dataset_description, ) + from clinica.utils.exceptions import ClinicaCAPSError caps_dir = tmp_path / "caps" caps_dir.mkdir() @@ -91,7 +92,7 @@ def test_write_caps_dataset_description_error(tmp_path): # But re-writing a different description raises an error with pytest.raises( - ValueError, + ClinicaCAPSError, match=( f"Impossible to write the dataset_description.json file in {caps_dir} " "because it already exists and it contains incompatible metadata." diff --git a/test/unittests/utils/test_utils_inputs.py b/test/unittests/utils/test_utils_inputs.py index 655edc344..4659ebe02 100644 --- a/test/unittests/utils/test_utils_inputs.py +++ b/test/unittests/utils/test_utils_inputs.py @@ -1,3 +1,4 @@ +import json import os import re from pathlib import Path @@ -354,6 +355,18 @@ def test_check_caps_folder(tmp_path): (tmp_path / "subjects").mkdir() (tmp_path / "subjects" / "foo.txt").mkdir() + with pytest.raises( + ClinicaCAPSError, + match=re.escape( + f"The derivative directory ({tmp_path}) you provided is missing a dataset_description.json file." + ), + ): + check_caps_folder(tmp_path) + with open(tmp_path / "dataset_description.json", "w") as fp: + json.dump( + {"Name": "Example dataset", "BIDSVersion": "1.0.2", "CAPSVersion": "1.0.0"}, + fp, + ) assert check_caps_folder(tmp_path) is None (tmp_path / "sub-01").mkdir() with pytest.raises( @@ -709,6 +722,11 @@ def test_clinica_file_reader_dwi_dti(tmp_path): / "native_space" ) dti_folder.mkdir(parents=True) + with open(tmp_path / "dataset_description.json", "w") as fp: + json.dump( + {"Name": "Example dataset", "BIDSVersion": "1.0.2", "CAPSVersion": "1.0.0"}, + fp, + ) for measure in DTIBasedMeasure: (dti_folder / f"sub-01_ses-M000_space-T1w_{measure.value}.nii.gz").touch() query = dwi_dti("FA", space="T1w")