Skip to content

Commit

Permalink
change preprocessing config
Browse files Browse the repository at this point in the history
  • Loading branch information
camillebrianceau committed Oct 23, 2024
1 parent 2d5a46f commit 580f249
Show file tree
Hide file tree
Showing 10 changed files with 158 additions and 134 deletions.
36 changes: 0 additions & 36 deletions clinicadl/caps_dataset/caps_dataset_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,39 +89,3 @@ def from_preprocessing_and_extraction_method(
extraction=get_extraction(ExtractionMethod(extraction))(**kwargs),
transforms=TransformsConfig(**kwargs),
)

def compute_folder_and_file_type(
self, from_bids: Optional[Path] = None
) -> Tuple[str, FileType]:
preprocessing = self.preprocessing.preprocessing
if from_bids is not None:
if isinstance(self.preprocessing, CustomPreprocessingConfig):
mod_subfolder = Preprocessing.CUSTOM.value
file_type = FileType(
pattern=f"*{self.preprocessing.custom_suffix}",
description="Custom suffix",
)
else:
mod_subfolder = preprocessing
file_type = bids_nii(self.preprocessing)

elif preprocessing not in Preprocessing:
raise NotImplementedError(
f"Extraction of preprocessing {preprocessing} is not implemented from CAPS directory."
)
else:
mod_subfolder = preprocessing.value.replace("-", "_")
if isinstance(self.preprocessing, T1PreprocessingConfig) or isinstance(
self.preprocessing, FlairPreprocessingConfig
):
file_type = linear_nii(self.preprocessing)
elif isinstance(self.preprocessing, PETPreprocessingConfig):
file_type = pet_linear_nii(self.preprocessing)
elif isinstance(self.preprocessing, DTIPreprocessingConfig):
file_type = dwi_dti(self.preprocessing)
elif isinstance(self.preprocessing, CustomPreprocessingConfig):
file_type = FileType(
pattern=f"*{self.preprocessing.custom_suffix}",
description="Custom suffix",
)
return mod_subfolder, file_type
80 changes: 2 additions & 78 deletions clinicadl/caps_dataset/caps_dataset_utils.py
Original file line number Diff line number Diff line change
@@ -1,82 +1,6 @@
import json
from pathlib import Path
from typing import Any, Dict, Optional, Tuple

from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig
from clinicadl.caps_dataset.preprocessing.config import (
CustomPreprocessingConfig,
DTIPreprocessingConfig,
FlairPreprocessingConfig,
PETPreprocessingConfig,
T1PreprocessingConfig,
)
from clinicadl.caps_dataset.preprocessing.utils import (
bids_nii,
dwi_dti,
linear_nii,
pet_linear_nii,
)
from clinicadl.utils.enum import Preprocessing
from clinicadl.utils.exceptions import ClinicaDLArgumentError
from clinicadl.utils.iotools.clinica_utils import FileType


def compute_folder_and_file_type(
config: CapsDatasetConfig, from_bids: Optional[Path] = None
) -> Tuple[str, FileType]:
preprocessing = config.preprocessing.preprocessing
if from_bids is not None:
if isinstance(config.preprocessing, CustomPreprocessingConfig):
mod_subfolder = Preprocessing.CUSTOM.value
file_type = FileType(
pattern=f"*{config.preprocessing.custom_suffix}",
description="Custom suffix",
)
else:
mod_subfolder = preprocessing
file_type = bids_nii(config.preprocessing)

elif preprocessing not in Preprocessing:
raise NotImplementedError(
f"Extraction of preprocessing {preprocessing} is not implemented from CAPS directory."
)
else:
mod_subfolder = preprocessing.value.replace("-", "_")
if isinstance(config.preprocessing, T1PreprocessingConfig) or isinstance(
config.preprocessing, FlairPreprocessingConfig
):
file_type = linear_nii(config.preprocessing)
elif isinstance(config.preprocessing, PETPreprocessingConfig):
file_type = pet_linear_nii(config.preprocessing)
elif isinstance(config.preprocessing, DTIPreprocessingConfig):
file_type = dwi_dti(config.preprocessing)
elif isinstance(config.preprocessing, CustomPreprocessingConfig):
file_type = FileType(
pattern=f"*{config.preprocessing.custom_suffix}",
description="Custom suffix",
)
return mod_subfolder, file_type


def find_file_type(config: CapsDatasetConfig) -> FileType:
if isinstance(config.preprocessing, T1PreprocessingConfig):
file_type = linear_nii(config.preprocessing)
elif isinstance(config.preprocessing, PETPreprocessingConfig):
if (
config.preprocessing.tracer is None
or config.preprocessing.suvr_reference_region is None
):
raise ClinicaDLArgumentError(
"`tracer` and `suvr_reference_region` must be defined "
"when using `pet-linear` preprocessing."
)
file_type = pet_linear_nii(config.preprocessing)
else:
raise NotImplementedError(
f"Generation of synthetic data is not implemented for preprocessing {config.preprocessing.preprocessing.value}"
)

return file_type
from typing import Any, Dict


def read_json(json_path: Path) -> Dict[str, Any]:
Expand Down Expand Up @@ -187,7 +111,7 @@ def read_json(json_path: Path) -> Dict[str, Any]:
**parameters,
)
if "file_type" not in parameters["preprocessing_dict"]:
_, file_type = compute_folder_and_file_type(config)
file_type = config.preprocessing.get_filetype()
parameters["preprocessing_dict"]["file_type"] = file_type.model_dump()

return parameters
10 changes: 5 additions & 5 deletions clinicadl/caps_dataset/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def _get_image_path(self, participant: str, session: str, cohort: str) -> Path:

# Try to find .nii.gz file
try:
folder, file_type = self.config.compute_folder_and_file_type()
folder, file_type = self.config.preprocessing.compute_folder_and_file_type()

results = clinicadl_file_reader(
[participant],
Expand All @@ -158,7 +158,7 @@ def _get_image_path(self, participant: str, session: str, cohort: str) -> Path:
image_path = image_dir / image_filename
# Try to find .pt file
except ClinicaDLCAPSError:
folder, file_type = self.config.compute_folder_and_file_type()
folder, file_type = self.config.preprocessing.compute_folder_and_file_type()
file_type.pattern = file_type.pattern.replace(".nii.gz", ".pt")
results = clinicadl_file_reader(
[participant],
Expand Down Expand Up @@ -220,9 +220,9 @@ def _get_full_image(self) -> torch.Tensor:

from clinicadl.utils.iotools.clinica_utils import clinicadl_file_reader

participant_id = self.df.loc[0, "participant_id"]
session_id = self.df.loc[0, "session_id"]
cohort = self.df.loc[0, "cohort"]
participant_id = self.df.at[0, "participant_id"]
session_id = self.df.at[0, "session_id"]
cohort = self.df.at[0, "cohort"]

try:
image_path = self._get_image_path(participant_id, session_id, cohort)
Expand Down
145 changes: 143 additions & 2 deletions clinicadl/caps_dataset/preprocessing/config.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,19 @@
import abc
from logging import getLogger
from pathlib import Path
from typing import Optional
from typing import Optional, Tuple

from pydantic import BaseModel, ConfigDict

from clinicadl.utils.enum import (
DTIMeasure,
DTISpace,
LinearModality,
Preprocessing,
SUVRReferenceRegions,
Tracer,
)
from clinicadl.utils.iotools.clinica_utils import FileType

logger = getLogger("clinicadl.modality_config")

Expand All @@ -22,36 +25,174 @@ class PreprocessingConfig(BaseModel):

tsv_file: Optional[Path] = None
preprocessing: Preprocessing
file_type: Optional[FileType] = None
use_uncropped_image: bool = False

# pydantic config
model_config = ConfigDict(validate_assignment=True)
model_config = ConfigDict(validate_assignment=True, arbitrary_types_allowed=True)

@abc.abstractmethod
def bids_nii(self, reconstruction: Optional[str] = None) -> FileType:
pass

@abc.abstractmethod
def caps_nii(self) -> tuple:
pass

@abc.abstractmethod
def get_filetype(self) -> FileType:
pass

def compute_folder_and_file_type(
self, from_bids: Optional[Path] = None
) -> Tuple[str, FileType]:
if from_bids is not None:
mod_subfolder = self.preprocessing.value
file_type = self.bids_nii()

elif self.preprocessing not in Preprocessing:
raise NotImplementedError(
f"Extraction of preprocessing {self.preprocessing.value} is not implemented from CAPS directory."
)
else:
mod_subfolder = self.preprocessing.value.replace("-", "_")
file_type = self.get_filetype()
return mod_subfolder, file_type

def linear_nii(self) -> FileType:
needed_pipeline, modality = self.caps_nii()

if self.use_uncropped_image:
desc_crop = ""
else:
desc_crop = "_desc-Crop"

file_type = FileType(
pattern=f"*space-MNI152NLin2009cSym{desc_crop}_res-1x1x1_{modality.value}.nii.gz",
description=f"{modality.value} Image registered in MNI152NLin2009cSym space using {needed_pipeline.value} pipeline "
+ (
""
if self.use_uncropped_image
else "and cropped (matrix size 169×208×179, 1 mm isotropic voxels)"
),
needed_pipeline=needed_pipeline,
)
return file_type


class PETPreprocessingConfig(PreprocessingConfig):
tracer: Tracer = Tracer.FFDG
suvr_reference_region: SUVRReferenceRegions = SUVRReferenceRegions.CEREBELLUMPONS2
preprocessing: Preprocessing = Preprocessing.PET_LINEAR

def bids_nii(self, reconstruction: Optional[str] = None) -> FileType:
trc = "" if self.tracer is None else f"_trc-{Tracer(self.tracer).value}"
rec = "" if reconstruction is None else f"_rec-{reconstruction}"
description = "PET data"

if self.tracer:
description += f" with {self.tracer.value} tracer"
if reconstruction:
description += f" and reconstruction method {reconstruction}"

file_type = FileType(
pattern=f"pet/*{trc}{rec}_pet.nii*", description=description
)
return file_type

def get_filetype(self) -> FileType:
if self.use_uncropped_image:
description = ""
else:
description = "_desc-Crop"

file_type = FileType(
pattern=f"pet_linear/*_trc-{self.tracer.value}_space-MNI152NLin2009cSym{description}_res-1x1x1_suvr-{self.suvr_reference_region.value}_pet.nii.gz",
description="",
needed_pipeline="pet-linear",
)
return file_type


class CustomPreprocessingConfig(PreprocessingConfig):
custom_suffix: str = ""
preprocessing: Preprocessing = Preprocessing.CUSTOM

def bids_nii(self, reconstruction: Optional[str] = None) -> FileType:
return FileType(
pattern=f"*{self.custom_suffix}",
description="Custom suffix",
)

def get_filetype(self) -> FileType:
return self.bids_nii()


class DTIPreprocessingConfig(PreprocessingConfig):
dti_measure: DTIMeasure = DTIMeasure.FRACTIONAL_ANISOTROPY
dti_space: DTISpace = DTISpace.ALL
preprocessing: Preprocessing = Preprocessing.DWI_DTI

def bids_nii(self, reconstruction: Optional[str] = None) -> FileType:
return FileType(pattern="dwi/sub-*_ses-*_dwi.nii*", description="DWI NIfTI")

def get_filetype(self) -> FileType:
"""Return the query dict required to capture DWI DTI images.
Parameters
----------
config: DTIPreprocessingConfig
Returns
-------
FileType :
"""
measure = self.dti_measure
space = self.dti_space

return FileType(
pattern=f"dwi/dti_based_processing/*/*_space-{space}_{measure.value}.nii.gz",
description=f"DTI-based {measure.value} in space {space}.",
needed_pipeline="dwi_dti",
)


class T1PreprocessingConfig(PreprocessingConfig):
preprocessing: Preprocessing = Preprocessing.T1_LINEAR

def bids_nii(self, reconstruction: Optional[str] = None) -> FileType:
return FileType(pattern="anat/sub-*_ses-*_T1w.nii*", description="T1w MRI")

def caps_nii(self) -> tuple:
return (self.preprocessing, LinearModality.T1W)

def get_filetype(self) -> FileType:
return self.linear_nii()


class FlairPreprocessingConfig(PreprocessingConfig):
preprocessing: Preprocessing = Preprocessing.FLAIR_LINEAR

def bids_nii(self, reconstruction: Optional[str] = None) -> FileType:
return FileType(pattern="sub-*_ses-*_flair.nii*", description="FLAIR T2w MRI")

def caps_nii(self) -> tuple:
return (self.preprocessing, LinearModality.T2W)

def get_filetype(self) -> FileType:
return self.linear_nii()


class T2PreprocessingConfig(PreprocessingConfig):
preprocessing: Preprocessing = Preprocessing.T2_LINEAR

def bids_nii(self, reconstruction: Optional[str] = None) -> FileType:
raise NotImplementedError(
f"Extraction of preprocessing {self.preprocessing.value} is not implemented from BIDS directory."
)

def caps_nii(self) -> tuple:
return (self.preprocessing, LinearModality.FLAIR)

def get_filetype(self) -> FileType:
return self.linear_nii()
3 changes: 1 addition & 2 deletions clinicadl/commandline/pipelines/generate/artifacts/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
from joblib import Parallel, delayed

from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig
from clinicadl.caps_dataset.caps_dataset_utils import find_file_type
from clinicadl.commandline import arguments
from clinicadl.commandline.modules_options import (
data,
Expand Down Expand Up @@ -87,7 +86,7 @@ def cli(generated_caps_directory, **kwargs):
(generated_caps_directory / "subjects").mkdir(parents=True, exist_ok=True)

# Find appropriate preprocessing file type
file_type = find_file_type(caps_config)
file_type = caps_config.preprocessing.get_filetype()

def create_artifacts_image(data_idx: int) -> pd.DataFrame:
participant_id = data_df.at[data_idx, "participant_id"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from nilearn.image import resample_to_img

from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig
from clinicadl.caps_dataset.caps_dataset_utils import find_file_type
from clinicadl.commandline import arguments
from clinicadl.commandline.modules_options import data, dataloader, preprocessing
from clinicadl.commandline.pipelines.generate.hypometabolic import (
Expand Down Expand Up @@ -84,8 +83,7 @@ def cli(generated_caps_directory, **kwargs):
(generated_caps_directory / "subjects").mkdir(parents=True, exist_ok=True)

# Find appropriate preprocessing file type
file_type = find_file_type(caps_config)

file_type = caps_config.preprocessing.get_filetype()
mask_path = get_mask_path(generate_config.pathology)

mask_nii = nib.loadsave.load(mask_path)
Expand Down
Loading

0 comments on commit 580f249

Please sign in to comment.