diff --git a/NEWS.md b/NEWS.md index c0afac8f..27074d61 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,16 @@ +# Version 2.2.3 + +## Minor changes + +- Tables with feature values now contain extra columns. These columns specify the file name (for non-DICOM input), + the directory path of the image and masks and several DICOM tags for identifying the input. + +- MIRP now checks whether there are potential problems between the frames of reference of image and mask files. + +# Fixes + +- Fixed an error that occurs when attempting to create a deep copy `ImageITKFile` objects. + # Version 2.2.2 ## Minor changes diff --git a/README.md b/README.md index 122e281b..d0d90904 100644 --- a/README.md +++ b/README.md @@ -44,8 +44,8 @@ pip install mirp MIRP can be used to compute quantitative features from regions of interest in images in an IBSI-compliant manner using a standardized workflow This requires both images and masks. MIRP can process DICOM, NIfTI, NRRD and numpy -images. Masks are DICOM radiotherapy structure sets (RTSTRUCT), or volumetric data with integer labels (e.g. 1, 2, -etc.). +images. Masks are DICOM radiotherapy structure sets (RTSTRUCT), DICOM segmentation (SEG) or volumetric data with +integer labels (e.g. 1, 2, etc.). Below is a minimal working example for extracting features from a single image file and its mask. diff --git a/docs/.buildinfo b/docs/.buildinfo index afebd61f..92258720 100644 --- a/docs/.buildinfo +++ b/docs/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 4e548bc695306a4ce61e35d5a23229c6 +config: b7d3c05f6bec86800f0a1510ac564b73 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_images/tutorial_compute_radiomics_features_mr_8_0.png b/docs/_images/tutorial_compute_radiomics_features_mr_8_0.png deleted file mode 100644 index ac067068..00000000 Binary files a/docs/_images/tutorial_compute_radiomics_features_mr_8_0.png and /dev/null differ diff --git a/docs/_modules/index.html b/docs/_modules/index.html index 538c9b03..173b7822 100644 --- a/docs/_modules/index.html +++ b/docs/_modules/index.html @@ -3,7 +3,7 @@ - Overview: module code — mirp 2.2.2 documentation + Overview: module code — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + diff --git a/docs/_modules/mirp/data_import/import_image_and_mask.html b/docs/_modules/mirp/data_import/import_image_and_mask.html index 60659b9e..10d2d277 100644 --- a/docs/_modules/mirp/data_import/import_image_and_mask.html +++ b/docs/_modules/mirp/data_import/import_image_and_mask.html @@ -3,7 +3,7 @@ - mirp.data_import.import_image_and_mask — mirp 2.2.1 documentation + mirp.data_import.import_image_and_mask — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,6 @@

Deep Dive

Contributing

diff --git a/docs/_modules/mirp/deepLearningPreprocessing.html b/docs/_modules/mirp/deepLearningPreprocessing.html deleted file mode 100644 index 55654591..00000000 --- a/docs/_modules/mirp/deepLearningPreprocessing.html +++ /dev/null @@ -1,509 +0,0 @@ - - - - - - mirp.deepLearningPreprocessing — mirp 2.0.1 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mirp.deepLearningPreprocessing

-from typing import Generator, Iterable, Any
-import copy
-import ray
-
-from mirp.importData.imageGenericFile import ImageFile
-from mirp.settings.settingsGeneric import SettingsClass
-from mirp.workflows.standardWorkflow import StandardWorkflow
-
-
-
-[docs] -def deep_learning_preprocessing( - output_slices: bool = False, - crop_size: None | list[float] | list[int] = None, - image_export_format: str = "numpy", - write_file_format: str = "numpy", - export_images: None | bool = None, - write_images: None | bool = None, - write_dir: None | str = None, - num_cpus: None | int = None, - **kwargs -) -> None | list[Any]: - """ - Pre-processes images for deep learning. - - Parameters - ---------- - output_slices: bool, optional, default: False - Determines whether separate slices should be extracted. - - crop_size: list of float or list of int, optional, default: None - Size to which the images and masks should be cropped. Images and masks are cropped around the center of the - mask(s). - - .. note:: - MIRP follows the numpy convention for indexing (*z*, *y*, *x*). The final element always corresponds to the - *x* dimension. - - image_export_format: {"dict", "native", "numpy"}, default: "numpy" - Return format for processed images and masks. ``"dict"`` returns dictionaries of images and masks as numpy - arrays and associated characteristics. ``"native"`` returns images and masks in their internal format. - ``"numpy"`` returns images and masks in numpy format. This argument is only used if ``export_images=True``. - - write_file_format: {"nifti", "numpy"}, default: "numpy" - File format for processed images and masks. ``"nifti"`` writes images and masks in the NIfTI file format, - and ``"numpy"`` writes images and masks as numpy files. This argument is only used if ``write_images=True``. - - export_images: bool, optional - Determines whether processed images and masks should be returned by the function. - - write_images: bool, optional - Determines whether processed images and masks should be written to the directory indicated by the - ``write_dir`` keyword argument. - - write_dir: str, optional - Path to directory where processed images and masks should be written. If not set, processed images and masks - are returned by this function. Required if ``write_images=True``. - - num_cpus: int, optional, default: None - Number of CPU nodes that should be used for parallel processing. Image and mask processing can be - parallelized using the ``ray`` package. If a ray cluster is defined by the user, this cluster will be used - instead. By default, image and mask processing are processed sequentially. - - **kwargs: - Keyword arguments passed for importing images and masks ( - :func:`~mirp.importData.importImageAndMask.import_image_and_mask`) and configuring settings (notably - :class:`~mirp.settings.settingsImageProcessing.ImagePostProcessingClass`, - :class:`~mirp.settings.settingsPerturbation.ImagePerturbationSettingsClass`), among others. - - Returns - ------- - None | list[Any] - List of images and masks in the format indicated by ``image_export_format``, if ``export_images=True``. - - See Also - -------- - Keyword arguments can be provided to configure the following: - - * image and mask import (:func:`~mirp.importData.importImageAndMask.import_image_and_mask`) - * image post-processing (:class:`~mirp.settings.settingsImageProcessing.ImagePostProcessingClass`) - * image perturbation / augmentation (:class:`~mirp.settings.settingPerturbation.ImagePerturbationSettingsClass`) - * image interpolation / resampling (:class:`~mirp.settings.settingsInterpolation.ImageInterpolationSettingsClass` and - :class:`~mirp.settings.settingsInterpolation.MaskInterpolationSettingsClass`) - * mask resegmentation (:class:`~mirp.settings.settingsMaskResegmentation.ResegmentationSettingsClass`) - - """ - - # Conditionally start a ray cluster. - external_ray = ray.is_initialized() - if not external_ray and num_cpus is not None and num_cpus > 1: - ray.init(num_cpus=num_cpus) - - if ray.is_initialized(): - # Parallel processing. - results = [ - _ray_extractor.remote( - workflow=workflow, - output_slices=output_slices, - crop_size=crop_size, - image_export_format=image_export_format, - write_file_format=write_file_format - ) - for workflow in _base_deep_learning_preprocessing( - export_images=export_images, - write_images=write_images, - write_dir=write_dir, - **kwargs - ) - ] - - results = ray.get(results) - if not external_ray: - ray.shutdown() - else: - workflows = list(_base_deep_learning_preprocessing( - export_images=export_images, - write_images=write_images, - write_dir=write_dir, - **kwargs) - ) - - results = [ - workflow.deep_learning_conversion( - output_slices=output_slices, - crop_size=crop_size, - image_export_format=image_export_format, - write_file_format=write_file_format - ) - for workflow in workflows - ] - - return results
- - - -@ray.remote -def _ray_extractor( - workflow: StandardWorkflow, - output_slices: bool = False, - crop_size: None | list[float] | list[int] = None, - image_export_format: str = "numpy", - write_file_format: str = "numpy" -): - # Limit internal threading by third-party libraries. - from mirp.utilities.parallel import limit_inner_threads - limit_inner_threads() - - return workflow.deep_learning_conversion( - output_slices=output_slices, - crop_size=crop_size, - image_export_format=image_export_format, - write_file_format=write_file_format - ) - - -
-[docs] -def deep_learning_preprocessing_generator( - output_slices: bool = False, - crop_size: None | list[float] | list[int] = None, - image_export_format: str = "numpy", - write_file_format: str = "numpy", - export_images: None | bool = None, - write_images: None | bool = None, - write_dir: None | str = None, - **kwargs -) -> Generator[Any, None, None]: - """ - Generator for pre-processing images for deep learning. - - Parameters - ---------- - output_slices: bool, optional, default: False - Determines whether separate slices should be extracted. - - crop_size: list of float or list of int, optional, default: None - Size to which the images and masks should be cropped. Images and masks are cropped around the center of the - mask(s). - - .. note:: - MIRP follows the numpy convention for indexing (*z*, *y*, *x*). The final element always corresponds to the - *x* dimension. - - image_export_format: {"dict", "native", "numpy"}, default: "numpy" - Return format for processed images and masks. ``"dict"`` returns dictionaries of images and masks as numpy - arrays and associated characteristics. ``"native"`` returns images and masks in their internal format. - ``"numpy"`` returns images and masks in numpy format. This argument is only used if ``export_images=True``. - - write_file_format: {"nifti", "numpy"}, default: "numpy" - File format for processed images and masks. ``"nifti"`` writes images and masks in the NIfTI file format, - and ``"numpy"`` writes images and masks as numpy files. This argument is only used if ``write_images=True``. - - export_images: bool, optional - Determines whether processed images and masks should be returned by the function. - - write_images: bool, optional - Determines whether processed images and masks should be written to the directory indicated by the - ``write_dir`` keyword argument. - - write_dir: str, optional - Path to directory where processed images and masks should be written. If not set, processed images and masks - are returned by this function. Required if ``write_images=True``. - - **kwargs: - Keyword arguments passed for importing images and masks ( - :func:`~mirp.importData.importImageAndMask.import_image_and_mask`) and configuring settings (notably - :class:`~mirp.settings.settingsImageProcessing.ImagePostProcessingClass`, - :class:`~mirp.settings.settingsPerturbation.ImagePerturbationSettingsClass`), among others. - - Yields - ------- - None | list[Any] - List of images and masks in the format indicated by ``image_export_format``, if ``export_images=True``. - - See Also - -------- - Keyword arguments can be provided to configure the following: - - * image and mask import (:func:`~mirp.importData.importImageAndMask.import_image_and_mask`) - * image post-processing (:class:`~mirp.settings.settingsImageProcessing.ImagePostProcessingClass`) - * image perturbation / augmentation (:class:`~mirp.settings.settingPerturbation.ImagePerturbationSettingsClass`) - * image interpolation / resampling (:class:`~mirp.settings.settingsInterpolation.ImageInterpolationSettingsClass` and - :class:`~mirp.settings.settingsInterpolation.MaskInterpolationSettingsClass`) - * mask resegmentation (:class:`~mirp.settings.settingsMaskResegmentation.ResegmentationSettingsClass`) - - """ - workflows = list(_base_deep_learning_preprocessing( - export_images=export_images, - write_images=write_images, - write_dir=write_dir, - **kwargs)) - - for workflow in workflows: - yield workflow.deep_learning_conversion( - output_slices=output_slices, - crop_size=crop_size, - image_export_format=image_export_format, - write_file_format=write_file_format - )
- - - -def _base_deep_learning_preprocessing( - image, - mask=None, - sample_name: None | str | list[str] = None, - image_name: None | str | list[str] = None, - image_file_type: None | str = None, - image_modality: None | str | list[str] = None, - image_sub_folder: None | str = None, - mask_name: None | str | list[str] = None, - mask_file_type: None | str = None, - mask_modality: None | str | list[str] = None, - mask_sub_folder: None | str = None, - roi_name: None | str | list[str] | dict[str, str] = None, - association_strategy: None | str | list[str] = None, - settings: None | str | SettingsClass | list[SettingsClass] = None, - stack_masks: str = "auto", - stack_images: str = "auto", - write_images: None | bool = None, - export_images: None | bool = None, - write_dir: None | str = None, - **kwargs -): - from mirp.importData.importImageAndMask import import_image_and_mask - from mirp.settings.importConfigurationSettings import import_configuration_settings - - # Infer write_images, export_images based on write_dir. - if write_images is None: - write_images = write_dir is not None - if export_images is None: - export_images = write_dir is None - - if not write_images: - write_dir = None - - if write_images and write_dir is None: - raise ValueError("write_dir argument should be provided for writing images and masks to.") - - if not write_images and not export_images: - raise ValueError(f"write_images and export_images arguments cannot both be False.") - - # Import settings (to provide immediate feedback if something is amiss). - if isinstance(settings, str): - settings = import_configuration_settings( - compute_features=False, - path=settings - ) - elif isinstance(settings, SettingsClass): - settings = [settings] - elif isinstance(settings, Iterable) and all(isinstance(x, SettingsClass) for x in settings): - settings = list(settings) - elif settings is None: - settings = import_configuration_settings( - compute_features=False, - **kwargs - ) - else: - raise TypeError( - f"The 'settings' argument is expected to be a path to a configuration xml file, " - f"a SettingsClass object, or a list thereof. Found: {type(settings)}." - ) - - image_list = import_image_and_mask( - image=image, - mask=mask, - sample_name=sample_name, - image_name=image_name, - image_file_type=image_file_type, - image_modality=image_modality, - image_sub_folder=image_sub_folder, - mask_name=mask_name, - mask_file_type=mask_file_type, - mask_modality=mask_modality, - mask_sub_folder=mask_sub_folder, - roi_name=roi_name, - association_strategy=association_strategy, - stack_images=stack_images, - stack_masks=stack_masks - ) - - yield from _generate_dl_preprocessing_workflows( - image_list=image_list, - settings=settings, - write_dir=write_dir, - write_images=write_images, - export_images=export_images - ) - - -def _generate_dl_preprocessing_workflows( - image_list: list[ImageFile], - settings: list[SettingsClass], - write_dir: None | str, - write_images: bool, - export_images: bool -) -> Generator[StandardWorkflow, None, None]: - - for image_file in image_list: - for current_settings in settings: - - # Update settings to remove settings that may cause problems. - current_settings.feature_extr.families = "none" - current_settings.img_transform.feature_settings.families = "none" - current_settings.perturbation.crop_around_roi = False - current_settings.roi_resegment.resegmentation_method = "none" - - if current_settings.perturbation.noise_repetitions is None or \ - current_settings.perturbation.noise_repetitions == 0: - noise_repetition_ids = [None] - else: - noise_repetition_ids = list(range(current_settings.perturbation.noise_repetitions)) - - if current_settings.perturbation.rotation_angles is None or len( - current_settings.perturbation.rotation_angles) == 0 or all( - x == 0.0 for x in current_settings.perturbation.rotation_angles - ): - rotation_angles = [None] - else: - rotation_angles = copy.deepcopy(current_settings.perturbation.rotation_angles) - - if current_settings.perturbation.translation_fraction is None or len( - current_settings.perturbation.translation_fraction) == 0 or all( - x == 0.0 for x in current_settings.perturbation.translation_fraction - ): - translations = [None] - else: - config_translation = copy.deepcopy(current_settings.perturbation.translation_fraction) - translations = [] - for translation_x in config_translation: - for translation_y in config_translation: - if not current_settings.general.by_slice: - for translation_z in config_translation: - translations += [(translation_z, translation_y, translation_x)] - else: - translations += [(0.0, translation_y, translation_x)] - - if current_settings.img_interpolate.new_spacing is None or len( - current_settings.img_interpolate.new_spacing) == 0 or all( - x == 0.0 for x in current_settings.img_interpolate.new_spacing - ): - spacings = [None] - else: - spacings = copy.deepcopy(current_settings.img_interpolate.new_spacing) - - for noise_repetition_id in noise_repetition_ids: - for rotation_angle in rotation_angles: - for translation in translations: - for spacing in spacings: - yield StandardWorkflow( - image_file=copy.deepcopy(image_file), - write_dir=write_dir, - settings=current_settings, - settings_name=current_settings.general.config_str, - write_features=False, - export_features=False, - write_images=write_images, - export_images=export_images, - noise_iteration_id=noise_repetition_id, - rotation=rotation_angle, - translation=translation, - new_image_spacing=spacing - ) -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/deep_learning_preprocessing.html b/docs/_modules/mirp/deep_learning_preprocessing.html index e1d95a4a..b0174e09 100644 --- a/docs/_modules/mirp/deep_learning_preprocessing.html +++ b/docs/_modules/mirp/deep_learning_preprocessing.html @@ -3,7 +3,7 @@ - mirp.deep_learning_preprocessing — mirp 2.2.1 documentation + mirp.deep_learning_preprocessing — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,6 @@

Deep Dive

Contributing

diff --git a/docs/_modules/mirp/extractFeaturesAndImages.html b/docs/_modules/mirp/extractFeaturesAndImages.html deleted file mode 100644 index 423f1f47..00000000 --- a/docs/_modules/mirp/extractFeaturesAndImages.html +++ /dev/null @@ -1,630 +0,0 @@ - - - - - - mirp.extractFeaturesAndImages — mirp 2.0.1 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mirp.extractFeaturesAndImages

-from typing import Generator, Iterable, Any
-import copy
-
-import ray
-
-from mirp.importData.imageGenericFile import ImageFile
-from mirp.settings.settingsGeneric import SettingsClass
-from mirp.workflows.standardWorkflow import StandardWorkflow
-
-
-
-[docs] -def extract_features( - write_features: None | bool = None, - export_features: None | bool = None, - write_dir: None | str = None, - **kwargs -) -> None | list[Any]: - """ - Compute features from regions of interest in images. This function is a wrapper around - :func:`~mirp.extractFeaturesAndImages.extract_features_and_images`. - - Parameters - ---------- - write_features: bool, optional - Determines whether features computed from images should be written to the directory indicated by the - ``write_dir`` keyword argument. - - export_features: bool, optional - Determines whether features computed from images should be returned by the function. - - write_dir: str, optional - Path to directory where feature tables should be written. If not set, feature tables are returned by this - function. Required if ``write_features=True``. - - **kwargs: - Keyword arguments passed to :func:`~mirp.extractFeaturesAndImages.extract_features_and_images`. - - Returns - ------- - None | list[Any] - List of feature tables, if ``export_features=True``. - - See Also - -------- - :func:`~mirp.extractFeaturesAndImages.extract_features_and_images` - - """ - return extract_features_and_images( - write_features=write_features, - export_features=export_features, - write_images=False, - export_images=False, - write_dir=write_dir, - **kwargs - )
- - - -
-[docs] -def extract_features_generator( - write_features: bool = False, - export_features: bool = True, - **kwargs -): - """ - Compute features from regions of interest in images. This generator is a wrapper around - :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator`. - - Parameters - ---------- - write_features: bool, default: False - Determines whether features computed from images should be written to the directory indicated by the - ``write_dir`` keyword argument. - - export_features: bool, default: True - Determines whether features computed from images should be returned by the function. - - **kwargs: - Keyword arguments passed to :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator`. - - Returns - ------- - None | list[Any] - List of feature tables, if ``export_features=True``. - - See Also - -------- - :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator` - - """ - yield from extract_features_and_images_generator( - write_features=write_features, - export_features=export_features, - write_images=False, - export_images=False, - **kwargs - )
- - - -
-[docs] -def extract_images( - write_images: None | bool = True, - export_images: None | bool = False, - write_dir: None | str = None, - **kwargs -): - """ - Process images and masks. This function is a wrapper around - :func:`~mirp.extractFeaturesAndImages.extract_features_and_images`. - - Parameters - ---------- - write_images: bool, optional - Determines whether processed images and masks should be written to the directory indicated by the - ``write_dir`` keyword argument. - - export_images: bool, optional - Determines whether processed images and masks should be returned by the function. - - write_dir: str, optional - Path to directory where processed images and masks should be written. If not set, processed images and masks - are returned by this function. Required if ``write_images=True``. - - **kwargs: - Keyword arguments passed to :func:`~mirp.extractFeaturesAndImages.extract_features_and_images`. - - Returns - ------- - None | list[Any] - List of feature tables, if ``export_images=True``. - - See Also - -------- - :func:`~mirp.extractFeaturesAndImages.extract_features_and_images` - - """ - return extract_features_and_images( - write_features=False, - export_features=False, - write_images=write_images, - export_images=export_images, - write_dir=write_dir, - **kwargs - )
- - - -
-[docs] -def extract_images_generator( - write_images: bool = False, - export_images: bool = True, - **kwargs -): - """ - Process images and masks. This generator is a wrapper around - :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator`. - - Parameters - ---------- - write_images: bool, default: True - Determines whether processed images and masks should be written to the directory indicated by the - ``write_dir`` keyword argument. - - export_images: bool, default: False - Determines whether processed images and masks should be returned by the function. - - **kwargs: - Keyword arguments passed to :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator`. - - Yields - ------ - None | list[Any] - List of feature tables, if ``export_images=True``. - - See Also - -------- - :func:`~mirp.extractFeaturesAndImages.extract_features_and_images_generator` - - """ - yield from extract_features_and_images_generator( - write_features=False, - export_features=False, - write_images=write_images, - export_images=export_images, - **kwargs - )
- - - -
-[docs] -def extract_features_and_images( - image_export_format: str = "dict", - num_cpus: None | int = None, - **kwargs -): - """ - Processes images and computes features from regions of interest. - - Parameters - ---------- - image_export_format: {"dict", "native", "numpy"}, default: "numpy" - Return format for processed images and masks. ``"dict"`` returns dictionaries of images and masks as numpy - arrays and associated characteristics. ``"native"`` returns images and masks in their internal format. - ``"numpy"`` returns images and masks in numpy format. This argument is only used if ``export_images=True``. - - num_cpus: int, optional, default: None - Number of CPU nodes that should be used for parallel processing. Image processing and feature computation can be - parallelized using the ``ray`` package. If a ray cluster is defined by the user, this cluster will be used - instead. By default, images are processed sequentially. - - **kwargs: - Keyword arguments passed for importing images and masks ( - :func:`mirp.importData.importImageAndMask.import_image_and_mask`) and configuring settings: - - * general settings (:class:`~mirp.settings.settingsGeneral.GeneralSettingsClass`) - * image post-processing (:class:`~mirp.settings.settingsImageProcessing.ImagePostProcessingClass`) - * image perturbation / augmentation (:class:`~mirp.settings.settingPerturbation.ImagePerturbationSettingsClass`) - * image interpolation / resampling ( - :class:`~mirp.settings.settingsInterpolation.ImageInterpolationSettingsClass` and - :class:`~mirp.settings.settingsInterpolation.MaskInterpolationSettingsClass`) - * mask resegmentation (:class:`~mirp.settings.settingsMaskResegmentation.ResegmentationSettingsClass`) - * image transformation (:class:`~mirp.settings.settingsImageTransformation.ImageTransformationSettingsClass`) - * feature computation / extraction ( - :class:`~mirp.settings.settingsFeatureExtraction.FeatureExtractionSettingsClass`) - - Returns - ------- - None | list[Any] - List of features, images and masks, depending on ``export_features`` and ``export_images``. - - See Also - -------- - Keyword arguments can be provided to configure the following: - - * image and mask import (:func:`~mirp.importData.importImageAndMask.import_image_and_mask`) - * general settings (:class:`~mirp.settings.settingsGeneral.GeneralSettingsClass`) - * image post-processing (:class:`~mirp.settings.settingsImageProcessing.ImagePostProcessingClass`) - * image perturbation / augmentation (:class:`~mirp.settings.settingPerturbation.ImagePerturbationSettingsClass`) - * image interpolation / resampling (:class:`~mirp.settings.settingsInterpolation.ImageInterpolationSettingsClass` and - :class:`~mirp.settings.settingsInterpolation.MaskInterpolationSettingsClass`) - * mask resegmentation (:class:`~mirp.settings.settingsMaskResegmentation.ResegmentationSettingsClass`) - * image transformation (:class:`~mirp.settings.settingsImageTransformation.ImageTransformationSettingsClass`) - * feature computation / extraction ( - :class:`~mirp.settings.settingsFeatureExtraction.FeatureExtractionSettingsClass`) - - """ - - # Conditionally start a ray cluster. - external_ray = ray.is_initialized() - if not external_ray and num_cpus is not None and num_cpus > 1: - ray.init(num_cpus=num_cpus) - - if ray.is_initialized(): - # Parallel processing. - results = [ - _ray_extractor.remote(workflow=workflow, image_export_format=image_export_format) - for workflow in _base_extract_features_and_images(**kwargs) - ] - - results = ray.get(results) - if not external_ray: - ray.shutdown() - - else: - # Sequential processing. - workflows = list(_base_extract_features_and_images(**kwargs)) - results = [workflow.standard_extraction(image_export_format=image_export_format) for workflow in workflows] - - return results
- - - -
-[docs] -def extract_features_and_images_generator( - image_export_format: str = "dict", - **kwargs -): - """ - Processes images and computes features from regions of interest as a generator. - - Parameters - ---------- - image_export_format: {"dict", "native", "numpy"}, default: "numpy" - Return format for processed images and masks. ``"dict"`` returns dictionaries of images and masks as numpy - arrays and associated characteristics. ``"native"`` returns images and masks in their internal format. - ``"numpy"`` returns images and masks in numpy format. This argument is only used if ``export_images=True``. - - **kwargs: - Keyword arguments passed for importing images and masks ( - :func:`mirp.importData.importImageAndMask.import_image_and_mask`) and configuring settings: - - * general settings (:class:`~mirp.settings.settingsGeneral.GeneralSettingsClass`) - * image post-processing (:class:`~mirp.settings.settingsImageProcessing.ImagePostProcessingClass`) - * image perturbation / augmentation (:class:`~mirp.settings.settingPerturbation.ImagePerturbationSettingsClass`) - * image interpolation / resampling ( - :class:`~mirp.settings.settingsInterpolation.ImageInterpolationSettingsClass` and - :class:`~mirp.settings.settingsInterpolation.MaskInterpolationSettingsClass`) - * mask resegmentation (:class:`~mirp.settings.settingsMaskResegmentation.ResegmentationSettingsClass`) - * image transformation (:class:`~mirp.settings.settingsImageTransformation.ImageTransformationSettingsClass`) - * feature computation / extraction ( - :class:`~mirp.settings.settingsFeatureExtraction.FeatureExtractionSettingsClass`) - - Yields - ------ - None | list[Any] - List of features, images and masks, depending on ``export_features`` and ``export_images``. - - See Also - -------- - Keyword arguments can be provided to configure the following: - - * image and mask import (:func:`~mirp.importData.importImageAndMask.import_image_and_mask`) - * general settings (:class:`~mirp.settings.settingsGeneral.GeneralSettingsClass`) - * image post-processing (:class:`~mirp.settings.settingsImageProcessing.ImagePostProcessingClass`) - * image perturbation / augmentation (:class:`~mirp.settings.settingPerturbation.ImagePerturbationSettingsClass`) - * image interpolation / resampling (:class:`~mirp.settings.settingsInterpolation.ImageInterpolationSettingsClass` and - :class:`~mirp.settings.settingsInterpolation.MaskInterpolationSettingsClass`) - * mask resegmentation (:class:`~mirp.settings.settingsMaskResegmentation.ResegmentationSettingsClass`) - * image transformation (:class:`~mirp.settings.settingsImageTransformation.ImageTransformationSettingsClass`) - * feature computation / extraction ( - :class:`~mirp.settings.settingsFeatureExtraction.FeatureExtractionSettingsClass`) - - """ - - workflows = list(_base_extract_features_and_images(**kwargs)) - for workflow in workflows: - yield workflow.standard_extraction(image_export_format=image_export_format)
- - - -@ray.remote -def _ray_extractor(workflow: StandardWorkflow, image_export_format="dict"): - # Limit internal threading by third-party libraries. - from mirp.utilities.parallel import limit_inner_threads - limit_inner_threads() - - return workflow.standard_extraction(image_export_format=image_export_format) - - -def _base_extract_features_and_images( - image, - mask=None, - sample_name: None | str | list[str] = None, - image_name: None | str | list[str] = None, - image_file_type: None | str = None, - image_modality: None | str | list[str] = None, - image_sub_folder: None | str = None, - mask_name: None | str | list[str] = None, - mask_file_type: None | str = None, - mask_modality: None | str | list[str] = None, - mask_sub_folder: None | str = None, - roi_name: None | str | list[str] | dict[str, str] = None, - association_strategy: None | str | list[str] = None, - settings: None | str | SettingsClass | list[SettingsClass] = None, - stack_masks: str = "auto", - stack_images: str = "auto", - write_features: None | bool = None, - export_features: None | bool = None, - write_images: None | bool = None, - export_images: None | bool = None, - write_dir: None | str = None, - **kwargs -): - from mirp.importData.importImageAndMask import import_image_and_mask - from mirp.settings.importConfigurationSettings import import_configuration_settings - - # Infer write_images, export_images, write_features, export_features based on write_dir. - if write_images is None: - write_images = write_dir is not None - if export_images is None: - export_images = write_dir is None - if write_features is None: - write_features = write_dir is not None - if export_features is None: - export_features = write_dir is None - - if not write_images and not write_features: - write_dir = None - - if write_images and write_dir is None: - raise ValueError("write_dir argument is required for writing images and masks, but not provided.") - if write_features and write_dir is None: - raise ValueError("write_dir argument is required for writing feature tables, but not provided.") - - if not write_features and not write_images and not export_features and not export_images: - raise ValueError( - f"At least one of write_features, write_images, export_features and export_images should be True." - ) - - # Import settings (to provide immediate feedback if something is amiss). - if isinstance(settings, str): - settings = import_configuration_settings( - compute_features=write_features or export_features, - path=settings - ) - elif isinstance(settings, SettingsClass): - settings = [settings] - elif isinstance(settings, Iterable) and all(isinstance(x, SettingsClass) for x in settings): - settings = list(settings) - elif settings is None: - settings = import_configuration_settings( - compute_features=write_features or export_features, - **kwargs - ) - else: - raise TypeError( - f"The 'settings' argument is expected to be a path to a configuration xml file, a SettingsClass object, or " - f"a list thereof. Found: {type(settings)}." - ) - - image_list = import_image_and_mask( - image=image, - mask=mask, - sample_name=sample_name, - image_name=image_name, - image_file_type=image_file_type, - image_modality=image_modality, - image_sub_folder=image_sub_folder, - mask_name=mask_name, - mask_file_type=mask_file_type, - mask_modality=mask_modality, - mask_sub_folder=mask_sub_folder, - roi_name=roi_name, - association_strategy=association_strategy, - stack_images=stack_images, - stack_masks=stack_masks - ) - - yield from _generate_feature_and_image_extraction_workflows( - image_list=image_list, - settings=settings, - write_dir=write_dir, - write_features=write_features, - export_features=export_features, - write_images=write_images, - export_images=export_images - ) - - -def _generate_feature_and_image_extraction_workflows( - image_list: list[ImageFile], - settings: list[SettingsClass], - write_dir: None | str, - write_features: bool, - export_features: bool, - write_images: bool, - export_images: bool -) -> Generator[StandardWorkflow, None, None]: - - for image_file in image_list: - for current_settings in settings: - - if not current_settings.feature_extr.has_any_feature_family() and ( - current_settings.img_transform.spatial_filters is not None and not - current_settings.img_transform.feature_settings.has_any_feature_family() - ) and (export_features or write_features): - raise ValueError( - "No feature families were specified. Please set 'base_feature_families' or" - " 'response_map_feature_families'." - ) - - if current_settings.perturbation.noise_repetitions is None or \ - current_settings.perturbation.noise_repetitions == 0: - noise_repetition_ids = [None] - else: - noise_repetition_ids = list(range(current_settings.perturbation.noise_repetitions)) - - if current_settings.perturbation.rotation_angles is None or len( - current_settings.perturbation.rotation_angles) == 0 or all( - x == 0.0 for x in current_settings.perturbation.rotation_angles - ): - rotation_angles = [None] - else: - rotation_angles = copy.deepcopy(current_settings.perturbation.rotation_angles) - - if current_settings.perturbation.translation_fraction is None or len( - current_settings.perturbation.translation_fraction) == 0 or all( - x == 0.0 for x in current_settings.perturbation.translation_fraction - ): - translations = [None] - else: - config_translation = copy.deepcopy(current_settings.perturbation.translation_fraction) - translations = [] - for translation_x in config_translation: - for translation_y in config_translation: - if not current_settings.general.by_slice: - for translation_z in config_translation: - translations += [(translation_z, translation_y, translation_x)] - else: - translations += [(0.0, translation_y, translation_x)] - - if current_settings.img_interpolate.new_spacing is None or len( - current_settings.img_interpolate.new_spacing) == 0 or all( - x == 0.0 for x in current_settings.img_interpolate.new_spacing - ): - spacings = [None] - else: - spacings = copy.deepcopy(current_settings.img_interpolate.new_spacing) - - for noise_repetition_id in noise_repetition_ids: - for rotation_angle in rotation_angles: - for translation in translations: - for spacing in spacings: - yield StandardWorkflow( - image_file=copy.deepcopy(image_file), - write_dir=write_dir, - settings=current_settings, - settings_name=current_settings.general.config_str, - write_features=write_features, - export_features=export_features, - write_images=write_images, - export_images=export_images, - noise_iteration_id=noise_repetition_id, - rotation=rotation_angle, - translation=translation, - new_image_spacing=spacing - ) -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/extractImageParameters.html b/docs/_modules/mirp/extractImageParameters.html deleted file mode 100644 index 69330e42..00000000 --- a/docs/_modules/mirp/extractImageParameters.html +++ /dev/null @@ -1,218 +0,0 @@ - - - - - - mirp.extractImageParameters — mirp 2.1.0 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -

Source code for mirp.extractImageParameters

-import os
-import pandas as pd
-
-from mirp.importData.imageGenericFile import ImageFile
-
-
-
-[docs] -def extract_image_parameters( - image, - sample_name: None | str | list[str] = None, - image_name: None | str | list[str] = None, - image_file_type: None | str = None, - image_modality: None | str | list[str] = None, - image_sub_folder: None | str = None, - stack_images: str = "auto", - write_file: bool = False, - write_dir: None | str = None -) -> pd.DataFrame | None: - """ - Extract parameters related to image acquisition and reconstruction from images. Not all metadata may - be available. - - Parameters - ---------- - image: Any - A path to an image file, a path to a directory containing image files, a path to a config_data.xml - file, a path to a csv file containing references to image files, a pandas.DataFrame containing references to - image files, or a numpy.ndarray. - - sample_name: str or list of str, default: None - Name of expected sample names. This is used to select specific image files. If None, no image files are - filtered based on the corresponding sample name (if known). - - image_name: str, optional, default: None - Pattern to match image files against. The matches are exact. Use wildcard symbols ("*") to - match varying structures. The sample name (if part of the file name) can also be specified using "#". For - example, image_name = '#_*_image' would find John_Doe in John_Doe_CT_image.nii or John_Doe_001_image.nii. - File extensions do not need to be specified. If None, file names are not used for filtering files and - setting sample names. - - image_file_type: {"dicom", "nifti", "nrrd", "numpy", "itk"}, optional, default: None - The type of file that is expected. If None, the file type is not used for filtering files. - "itk" comprises "nifti" and "nrrd" file types. - - image_modality: {"ct", "pet", "pt", "mri", "mr", "rtdose", "generic"}, optional, default: None - The type of modality that is expected. If None, modality is not used for filtering files. Note that only - DICOM files contain metadata concerning modality. - - image_sub_folder: str, optional, default: None - Fixed directory substructure where image files are located. If None, the directory substructure is not used - for filtering files. - - stack_images: {"auto", "yes", "no"}, optional, default: "str" - If image files in the same directory cannot be assigned to different samples, and are 2D (slices) of the same - size, they might belong to the same 3D image stack. "auto" will stack 2D numpy arrays, but not other file types. - "yes" will stack all files that contain 2D images, that have the same dimensions, orientation and spacing, - except for DICOM files. "no" will not stack any files. DICOM files ignore this argument, because their stacking - can be determined from metadata. - - write_file: bool, optional, default: False - Determines whether image acquisition and reconstruction metadata should be written to a table. - - write_dir: str, optional, default: None - Folder to which the table with image acquisition and reconstruction metadata is written. - - Returns - ------- - pd.DataFrame | None - The functions returns a table with metadata (`write_file == False`) or nothing (`write_file == True`) - """ - - from mirp.importData.importImage import import_image - - if not write_file: - write_dir = None - - if write_file and write_dir is None: - raise ValueError("write_dir argument should be provided for writing a table with image metadata.") - - image_list = import_image( - image=image, - sample_name=sample_name, - image_name=image_name, - image_file_type=image_file_type, - image_modality=image_modality, - image_sub_folder=image_sub_folder, - stack_images=stack_images - ) - - metadata = [_extract_image_parameters(ii, image) for ii, image in enumerate(image_list)] - metadata = pd.DataFrame(metadata) - - if write_file: - write_dir = os.path.normpath(write_dir) - if not os.path.exists(write_dir): - os.makedirs(write_dir) - - metadata.to_csv( - path_or_buf=os.path.join(write_dir, "mask_labels.csv") - ) - else: - return metadata
- - - -def _extract_image_parameters(index: int, image: ImageFile) -> dict[str, str]: - - metadata = image.export_metadata() - metadata.update({"image_index": index}) - - return metadata -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/extractMaskLabels.html b/docs/_modules/mirp/extractMaskLabels.html deleted file mode 100644 index 59794496..00000000 --- a/docs/_modules/mirp/extractMaskLabels.html +++ /dev/null @@ -1,220 +0,0 @@ - - - - - - mirp.extractMaskLabels — mirp 2.0.1 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -

Source code for mirp.extractMaskLabels

-from typing import Any
-
-import os
-import pandas as pd
-
-from mirp.importData.imageGenericFile import MaskFile
-
-
-
-[docs] -def extract_mask_labels( - mask=None, - sample_name: None | str | list[str] = None, - mask_name: None | str | list[str] = None, - mask_file_type: None | str = None, - mask_modality: None | str | list[str] = None, - mask_sub_folder: None | str = None, - stack_masks: str = "auto", - write_file: bool = False, - write_dir: None | str = None -) -> pd.DataFrame | None: - """ - Extract labels of regions of interest present in one or more mask files. - - Parameters - ---------- - mask: Any - A path to a mask file, a path to a directory containing mask files, a path to a config_data.xml - file, a path to a csv file containing references to mask files, a pandas.DataFrame containing references to - mask files, or a numpy.ndarray. - - sample_name: str or list of str, optional, default: None - Name of expected sample names. This is used to select specific mask files. If None, no mask files are filtered - based on the corresponding sample name (if known). - - mask_name: str, optional, default: None - Pattern to match mask files against. The matches are exact. Use wildcard symbols ("*") to match varying - structures. The sample name (if part of the file name) can also be specified using "#". For example, - mask_name = '#_*_mask' would find John_Doe in John_Doe_CT_mask.nii or John_Doe_001_mask.nii. File extensions - do not need to be specified. If None, file names are not used for filtering files and setting sample names. - - mask_file_type: {"dicom", "nifti", "nrrd", "numpy", "itk"}, optional, default: None - The type of file that is expected. If None, the file type is not used for filtering files. - "itk" comprises "nifti" and "nrrd" file types. - - mask_modality: {"rtstruct", "seg", "generic_mask"}, optional, default: None - The type of modality that is expected. If None, modality is not used for filtering files. - Note that only DICOM files contain metadata concerning modality. Masks from non-DICOM files are considered to - be "generic_mask". - - mask_sub_folder: str, optional, default: None - Fixed directory substructure where mask files are located. If None, the directory substructure is not used for - filtering files. - - stack_masks: {"auto", "yes", "no"}, optional, default: "str" - If mask files in the same directory cannot be assigned to different samples, and are 2D (slices) of the same - size, they might belong to the same 3D mask stack. "auto" will stack 2D numpy arrays, but not other file - types. "yes" will stack all files that contain 2D images, that have the same dimensions, orientation and - spacing, except for DICOM files. "no" will not stack any files. DICOM files ignore this argument, - because their stacking can be determined from metadata. - - write_file: bool, optional, default: False - Determines whether the labels should be written to a table. - - write_dir: str, optional, default: None - Folder to which a table with mask labels should be written. - - Returns - ------- - pd.DataFrame | None - The functions returns a table with labels extracted from mask files (`write_file == False`), - or None `(write_file == True)`. - - """ - from mirp.importData.importMask import import_mask - - if not write_file: - write_dir = None - - if write_file and write_dir is None: - raise ValueError("write_dir argument should be provided for writing a table with mask labels.") - - mask_list = import_mask( - mask=mask, - sample_name=sample_name, - mask_name=mask_name, - mask_file_type=mask_file_type, - mask_modality=mask_modality, - mask_sub_folder=mask_sub_folder, - stack_masks=stack_masks - ) - - labels = [pd.DataFrame(_extract_mask_labels(ii, mask)) for ii, mask in enumerate(mask_list)] - labels = pd.concat(labels) - - if write_file: - write_dir = os.path.normpath(write_dir) - if not os.path.exists(write_dir): - os.makedirs(write_dir) - - labels.to_csv( - path_or_buf=os.path.join(write_dir, "mask_labels.csv") - ) - else: - return labels
- - - -def _extract_mask_labels(index: int, mask: MaskFile) -> dict[str, Any]: - - labels = mask.export_roi_labels() - labels.update({"mask_index": index}) - - return labels -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/extract_features_and_images.html b/docs/_modules/mirp/extract_features_and_images.html index f26f2dfe..77a204af 100644 --- a/docs/_modules/mirp/extract_features_and_images.html +++ b/docs/_modules/mirp/extract_features_and_images.html @@ -3,7 +3,7 @@ - mirp.extract_features_and_images — mirp 2.2.1 documentation + mirp.extract_features_and_images — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,6 @@

Deep Dive

Contributing

@@ -692,6 +694,10 @@

Source code for mirp.extract_features_and_images

) -> Generator[StandardWorkflow, None, None]: for image_file in image_list: + + # Check for potentially problematic stuff. + image_file.check_associated_masks() + for current_settings in settings: if not current_settings.feature_extr.has_any_feature_family() and ( diff --git a/docs/_modules/mirp/extract_image_parameters.html b/docs/_modules/mirp/extract_image_parameters.html index 2efc2168..0f183cae 100644 --- a/docs/_modules/mirp/extract_image_parameters.html +++ b/docs/_modules/mirp/extract_image_parameters.html @@ -3,7 +3,7 @@ - mirp.extract_image_parameters — mirp 2.2.1 documentation + mirp.extract_image_parameters — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,6 @@

Deep Dive

Contributing

diff --git a/docs/_modules/mirp/extract_mask_labels.html b/docs/_modules/mirp/extract_mask_labels.html index 95b96184..222d5ff9 100644 --- a/docs/_modules/mirp/extract_mask_labels.html +++ b/docs/_modules/mirp/extract_mask_labels.html @@ -3,7 +3,7 @@ - mirp.extract_mask_labels — mirp 2.2.1 documentation + mirp.extract_mask_labels — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,6 @@

Deep Dive

Contributing

diff --git a/docs/_modules/mirp/importData/importImageAndMask.html b/docs/_modules/mirp/importData/importImageAndMask.html deleted file mode 100644 index 3af39d96..00000000 --- a/docs/_modules/mirp/importData/importImageAndMask.html +++ /dev/null @@ -1,412 +0,0 @@ - - - - - - mirp.importData.importImageAndMask — mirp 2.1.0 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mirp.importData.importImageAndMask

-from mirp.importData.importImage import import_image
-from mirp.importData.importMask import import_mask
-from mirp.importData.imageGenericFile import ImageFile, MaskFile
-from mirp.importData.imageDicomFile import ImageDicomFile, MaskDicomFile
-from mirp.importData.imageDicomFileStack import ImageDicomFileStack
-from mirp.utilities.utilities import random_string
-
-
-
-[docs] -def import_image_and_mask( - image, - mask=None, - sample_name: None | str | list[str] = None, - image_name: None | str | list[str] = None, - image_file_type: None | str = None, - image_modality: None | str | list[str] = None, - image_sub_folder: None | str = None, - mask_name: None | str | list[str] = None, - mask_file_type: None | str = None, - mask_modality: None | str | list[str] = None, - mask_sub_folder: None | str = None, - roi_name: None | str | list[str] | dict[str | str] = None, - association_strategy: None | str | list[str] = None, - stack_images: str = "auto", - stack_masks: str = "auto" -) -> list[ImageFile]: - """ - Creates and curates references to image and mask files. This function is usually called internally by other - functions such as :func:`~mirp.extractFeaturesAndImages.extract_features`. - - Parameters - ---------- - image: Any - A path to an image file, a path to a directory containing image files, a path to a config_data.xml - file, a path to a csv file containing references to image files, a pandas.DataFrame containing references to - image files, or a numpy.ndarray. - - mask: Any - A path to a mask file, a path to a directory containing mask files, a path to a config_data.xml - file, a path to a csv file containing references to mask files, a pandas.DataFrame containing references to - mask files, or a numpy.ndarray. - - sample_name: str or list of str, default: None - Name of expected sample names. This is used to select specific image files. If None, no image files are - filtered based on the corresponding sample name (if known). - - image_name: str, optional, default: None - Pattern to match image files against. The matches are exact. Use wildcard symbols ("*") to - match varying structures. The sample name (if part of the file name) can also be specified using "#". For - example, image_name = '#_*_image' would find John_Doe in John_Doe_CT_image.nii or John_Doe_001_image.nii. - File extensions do not need to be specified. If None, file names are not used for filtering files and - setting sample names. - - image_file_type: {"dicom", "nifti", "nrrd", "numpy", "itk"}, optional, default: None - The type of file that is expected. If None, the file type is not used for filtering files. - "itk" comprises "nifti" and "nrrd" file types. - - image_modality: {"ct", "pet", "pt", "mri", "mr", "rtdose", "generic"}, optional, default: None - The type of modality that is expected. If None, modality is not used for filtering files. Note that only - DICOM files contain metadata concerning modality. - - image_sub_folder: str, optional, default: None - Fixed directory substructure where image files are located. If None, the directory substructure is not used - for filtering files. - - mask_name: str or list of str, optional, default: None - Pattern to match mask files against. The matches are exact. Use wildcard symbols ("*") to match varying - structures. The sample name (if part of the file name) can also be specified using "#". For example, - mask_name = '#_*_mask' would find John_Doe in John_Doe_CT_mask.nii or John_Doe_001_mask.nii. File extensions - do not need to be specified. If None, file names are not used for filtering files and setting sample names. - - mask_file_type: {"dicom", "nifti", "nrrd", "numpy", "itk"}, optional, default: None - The type of file that is expected. If None, the file type is not used for filtering files. - "itk" comprises "nifti" and "nrrd" file types. - - mask_modality: {"rtstruct", "seg", "generic_mask"}, optional, default: None - The type of modality that is expected. If None, modality is not used for filtering files. - Note that only DICOM files contain metadata concerning modality. Masks from non-DICOM files are considered to - be "generic_mask". - - mask_sub_folder: str, optional, default: None - Fixed directory substructure where mask files are located. If None, the directory substructure is not used for - filtering files. - - roi_name: str, optional, default: None - Name of the regions of interest that should be assessed. - - association_strategy: {"frame_of_reference", "sample_name", "file_distance", "file_name_similarity", "list_order", "position", "single_image"} - The preferred strategy for associating images and masks. File association is preferably done using frame of - reference UIDs (DICOM), or sample name (NIfTI, numpy). Other options are relatively frail, except for - `list_order` which may be applicable when a list with images and a list with masks is provided and both lists - are of equal length. - - stack_images: {"auto", "yes", "no"}, optional, default: "str" - If image files in the same directory cannot be assigned to different samples, and are 2D (slices) of the same - size, they might belong to the same 3D image stack. "auto" will stack 2D numpy arrays, but not other file types. - "yes" will stack all files that contain 2D images, that have the same dimensions, orientation and spacing, - except for DICOM files. "no" will not stack any files. DICOM files ignore this argument, because their stacking - can be determined from metadata. - - stack_masks: {"auto", "yes", "no"}, optional, default: "str" - If mask files in the same directory cannot be assigned to different samples, and are 2D (slices) of the same - size, they might belong to the same 3D mask stack. "auto" will stack 2D numpy arrays, but not other file - types. "yes" will stack all files that contain 2D images, that have the same dimensions, orientation and - spacing, except for DICOM files. "no" will not stack any files. DICOM files ignore this argument, - because their stacking can be determined from metadata. - - Returns - ------- - list[ImageFile] - The functions returns a list of ImageFile objects, if any were found with the specified filters. - """ - if mask is None: - mask = image - - # Generate list of images. - image_list = import_image( - image, - sample_name=sample_name, - image_name=image_name, - image_file_type=image_file_type, - image_modality=image_modality, - image_sub_folder=image_sub_folder, - stack_images=stack_images - ) - - # Generate list of images. - mask_list = import_mask( - mask, - sample_name=sample_name, - mask_name=mask_name, - mask_file_type=mask_file_type, - mask_modality=mask_modality, - mask_sub_folder=mask_sub_folder, - stack_masks=stack_masks, - roi_name=roi_name - ) - - if len(image_list) == 0: - raise ValueError(f"No images were present.") - - # Determine association strategy, if this is unset. - possible_association_strategy = set_association_strategy( - image_list=image_list, - mask_list=mask_list - ) - - if association_strategy is None: - association_strategy = possible_association_strategy - elif isinstance(association_strategy, str): - association_strategy = [association_strategy] - - if not isinstance(association_strategy, set): - association_strategy = set(association_strategy) - - # Test association strategy. - unavailable_strategy = association_strategy - possible_association_strategy - if len(unavailable_strategy) > 0: - raise ValueError( - f"One or more strategies for associating images and masks are not available for the provided image and " - f"mask set: {', '.join(list(unavailable_strategy))}. Only the following strategies are available: " - f"{'. '.join(list(possible_association_strategy))}" - ) - - if len(possible_association_strategy) == 0: - raise ValueError( - f"No strategies for associating images and masks are available, indicating that there is no clear way to " - f"establish an association." - ) - - # Start association. - if association_strategy == {"list_order"}: - # If only the list_order strategy is available, use this. - for ii, image in enumerate(image_list): - image.associated_masks = [mask_list[ii]] - - elif association_strategy == {"single_image"}: - # If single_image is the only strategy, use this. - image_list[0].associated_masks = mask_list - - else: - for ii, image in enumerate(image_list): - image.associate_with_mask( - mask_list=mask_list, - association_strategy=association_strategy - ) - - if all(image.associated_masks is None for image in image_list): - if "single_image" in association_strategy: - image_list[0].associated_masks = mask_list - elif "list_order" in association_strategy: - for ii, image in enumerate(image_list): - image.associated_masks = [mask_list[ii]] - - # Ensure that we are working with deep copies from this point - we don't want to propagate changes to masks, - # images by reference. - image_list = [image.copy() for image in image_list] - - # Set sample names. First we check if all sample names are missing. - if all(image.sample_name is None for image in image_list): - if isinstance(sample_name, str): - sample_name = [sample_name] - - if isinstance(sample_name, list) and len(sample_name) == len(image_list): - for ii, image in enumerate(image_list): - image.set_sample_name(sample_name=sample_name[ii]) - if image.associated_masks is not None: - for mask in image.associated_masks: - mask.set_sample_name(sample_name=sample_name[ii]) - - elif all(image.file_name is not None for image in image_list): - for image in image_list: - image.set_sample_name(sample_name=image.file_name) - - if image.associated_masks is not None: - for mask in image.associated_masks: - mask.set_sample_name(sample_name=image.file_name) - - # Then set any sample names for images that still miss them. - if any(image.sample_name is None for image in image_list): - for ii, image in enumerate(image_list): - if image.sample_name is None: - generated_sample_name = str(ii + 1) + "_" + random_string(16) - image.set_sample_name(sample_name=generated_sample_name) - if image.associated_masks is not None: - for mask in image.associated_masks: - mask.set_sample_name(sample_name=generated_sample_name) - - return image_list
- - - -def set_association_strategy( - image_list: list[ImageFile] | list[ImageDicomFile], - mask_list: list[MaskFile] | list[MaskDicomFile] -) -> set[str]: - # Association strategy is set by a process of elimination. - possible_strategies = { - "frame_of_reference", "sample_name", "file_distance", "file_name_similarity", "list_order", "position", - "single_image" - } - - # Check that images and masks are available - if len(mask_list) == 0 or len(image_list) == 0: - return set([]) - - # Check if association by list order is possible. - if len(image_list) != len(mask_list): - possible_strategies.remove("list_order") - - # Check that association with a single image is possible. - if len(image_list) > 1: - possible_strategies.remove("single_image") - - # Check if association by frame of reference UID is possible. - if (any(isinstance(image, ImageDicomFile) or isinstance(image, ImageDicomFileStack) for image in image_list) and - any(isinstance(mask, MaskDicomFile) for mask in mask_list)): - dcm_image_list: list[ImageDicomFile | ImageDicomFileStack] = [ - image for image in image_list - if isinstance(image, ImageDicomFile) or isinstance(image, ImageDicomFileStack)] - dcm_mask_list: list[MaskDicomFile] = [mask for mask in mask_list if isinstance(mask, MaskDicomFile)] - - # If frame of reference UIDs are completely absent. - if all(image.frame_of_reference_uid is None for image in dcm_image_list) or \ - all(mask.frame_of_reference_uid is None for mask in dcm_mask_list): - possible_strategies.remove("frame_of_reference") - - else: - possible_strategies.remove("frame_of_reference") - - # Check if association by sample name is possible. - if all(image.sample_name is None for image in image_list) or all(mask.sample_name is None for mask in mask_list): - possible_strategies.remove("sample_name") - - # Check if file_distance is possible. If directory are absent or singular, file distance cannot be used for - # association. - image_dir_path = set(image.dir_path for image in image_list) - {None} - mask_dir_path = set(mask.dir_path for mask in mask_list) - {None} - if len(image_dir_path) == 0 or len(mask_dir_path) <= 1: - possible_strategies.remove("file_distance") - - # Check if file_name_similarity is possible. If file names are absent, this is not possible. - if all(image.file_name is None for image in image_list) or all(mask.file_name is None for mask in mask_list): - possible_strategies.remove("file_name_similarity") - - # Check if position can be used. - if all(image.image_origin is None for image in image_list) or all(mask.image_origin is None for mask in mask_list): - possible_strategies.remove("position") - else: - image_position_data = set([ - image.get_image_origin(as_str=True) + image.get_image_spacing(as_str=True) + - image.get_image_dimension(as_str=True) + image.get_image_orientation(as_str=True) - for image in image_list if image.image_origin is not None - ]) - mask_position_data = set([ - mask.get_image_origin(as_str=True) + mask.get_image_spacing(as_str=True) + - mask.get_image_dimension(as_str=True) + mask.get_image_orientation(as_str=True) - for mask in mask_list if mask.image_origin is not None - ]) - - # Check that there are more - if len(image_position_data) <= 1 or len(mask_position_data) <= 1: - possible_strategies.remove("position") - - return possible_strategies -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/settings/feature_parameters.html b/docs/_modules/mirp/settings/feature_parameters.html index bbe94d7d..76502624 100644 --- a/docs/_modules/mirp/settings/feature_parameters.html +++ b/docs/_modules/mirp/settings/feature_parameters.html @@ -3,7 +3,7 @@ - mirp.settings.feature_parameters — mirp 2.2.1 documentation + mirp.settings.feature_parameters — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,6 @@

Deep Dive

Contributing

diff --git a/docs/_modules/mirp/settings/general_parameters.html b/docs/_modules/mirp/settings/general_parameters.html index ce16bb91..49a52692 100644 --- a/docs/_modules/mirp/settings/general_parameters.html +++ b/docs/_modules/mirp/settings/general_parameters.html @@ -3,7 +3,7 @@ - mirp.settings.general_parameters — mirp 2.2.1 documentation + mirp.settings.general_parameters — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -44,11 +44,13 @@ diff --git a/docs/_modules/mirp/settings/generic.html b/docs/_modules/mirp/settings/generic.html index fde619ca..c2b80f9a 100644 --- a/docs/_modules/mirp/settings/generic.html +++ b/docs/_modules/mirp/settings/generic.html @@ -3,7 +3,7 @@ - mirp.settings.generic — mirp 2.2.1 documentation + mirp.settings.generic — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,6 @@

Deep Dive

Contributing

diff --git a/docs/_modules/mirp/settings/image_processing_parameters.html b/docs/_modules/mirp/settings/image_processing_parameters.html index 45163263..33d0648b 100644 --- a/docs/_modules/mirp/settings/image_processing_parameters.html +++ b/docs/_modules/mirp/settings/image_processing_parameters.html @@ -3,7 +3,7 @@ - mirp.settings.image_processing_parameters — mirp 2.2.1 documentation + mirp.settings.image_processing_parameters — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,6 @@

Deep Dive

Contributing

diff --git a/docs/_modules/mirp/settings/interpolation_parameters.html b/docs/_modules/mirp/settings/interpolation_parameters.html index 35600fba..9d6c0d36 100644 --- a/docs/_modules/mirp/settings/interpolation_parameters.html +++ b/docs/_modules/mirp/settings/interpolation_parameters.html @@ -3,7 +3,7 @@ - mirp.settings.interpolation_parameters — mirp 2.2.1 documentation + mirp.settings.interpolation_parameters — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,6 @@

Deep Dive

Contributing

diff --git a/docs/_modules/mirp/settings/perturbation_parameters.html b/docs/_modules/mirp/settings/perturbation_parameters.html index 37f03f7f..2b16db55 100644 --- a/docs/_modules/mirp/settings/perturbation_parameters.html +++ b/docs/_modules/mirp/settings/perturbation_parameters.html @@ -3,7 +3,7 @@ - mirp.settings.perturbation_parameters — mirp 2.2.1 documentation + mirp.settings.perturbation_parameters — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -44,11 +44,13 @@ diff --git a/docs/_modules/mirp/settings/resegmentation_parameters.html b/docs/_modules/mirp/settings/resegmentation_parameters.html index 9c58cce7..f7005fbd 100644 --- a/docs/_modules/mirp/settings/resegmentation_parameters.html +++ b/docs/_modules/mirp/settings/resegmentation_parameters.html @@ -3,7 +3,7 @@ - mirp.settings.resegmentation_parameters — mirp 2.2.1 documentation + mirp.settings.resegmentation_parameters — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,6 @@

Deep Dive

Contributing

diff --git a/docs/_modules/mirp/settings/settingsFeatureExtraction.html b/docs/_modules/mirp/settings/settingsFeatureExtraction.html deleted file mode 100644 index 3bac3f26..00000000 --- a/docs/_modules/mirp/settings/settingsFeatureExtraction.html +++ /dev/null @@ -1,756 +0,0 @@ - - - - - - mirp.settings.settingsFeatureExtraction — mirp 2.1.0 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mirp.settings.settingsFeatureExtraction

-from typing import Union, List, Any
-from dataclasses import dataclass
-from mirp.settings.utilities import setting_def
-
-
-
-[docs] -@dataclass -class FeatureExtractionSettingsClass: - """ - Parameters related to feature computation. Many are conditional on the type of features that will be computed ( - ``base_feature_families``). - - Parameters - ---------- - by_slice: str or bool, optional, default: False - Defines whether calculations should be performed in 2D (True) or 3D (False), or alternatively only in the - largest slice ("largest"). See :class:`~mirp.settings.settingsGeneral.GeneralSettingsClass`. - - no_approximation: bool, optional, default: False - Disables approximation of features, such as Geary's c-measure. Can be True or False (default). See - :class:`~mirp.settings.settingsGeneral.GeneralSettingsClass`. - - ibsi_compliant: bool, optional, default: True - Limits computation of features to those features that have a reference value in the IBSI reference standard. - - base_feature_families: str or list of str, optional, default: "none" - Determines the feature families for which features are computed. Radiomics features are implemented as - defined in the IBSI reference manual. The following feature families are currently present, and can be added - using the following tags: - - * Morphological features: "mrp", "morph", "morphology", and "morphological". - * Local intensity features: "li", "loc.int", "loc_int", "local_int", and "local_intensity". - * Intensity-based statistical features: "st", "stat", "stats", "statistics", and "statistical". - * Intensity histogram features: "ih", "int_hist", "int_histogram", and "intensity_histogram". - * Intensity-volume histogram features: "ivh", "int_vol_hist", and "intensity_volume_histogram". - * Grey level co-occurrence matrix (GLCM) features: "cm", "glcm", "grey_level_cooccurrence_matrix", - and "cooccurrence_matrix". - * Grey level run length matrix (GLRLM) features: "rlm", "glrlm", "grey_level_run_length_matrix", and - "run_length_matrix". - * Grey level size zone matrix (GLSZM) features: "szm", "glszm", "grey_level_size_zone_matrix", and - "size_zone_matrix". - * Grey level distance zone matrix (GLDZM) features: "dzm", "gldzm", "grey_level_distance_zone_matrix", and - "distance_zone_matrix". - * Neighbourhood grey tone difference matrix (NGTDM) features: "tdm", "ngtdm", - "neighbourhood_grey_tone_difference_matrix", and "grey_tone_difference_matrix". - * Neighbouring grey level dependence matrix (NGLDM) features: "ldm", "ngldm", - "neighbouring_grey_level_dependence_matrix", and "grey_level_dependence_matrix". - - In addition, the following tags can be used: - - * "none": no features are computed. - * "all": all features are computed. - - A list of strings may be provided to select multiple feature families. - - .. note:: - Even though ``"none"`` is the internal default, the :func:`~mirp.extractFeaturesAndImages.extract_features` - function overrides this, and sets the default to ``"all"``. - - base_discretisation_method: {"fixed_bin_number", "fixed_bin_size", "fixed_bin_size_pyradiomics", "none"} - Method used for discretising intensities. Used to compute intensity histogram as well as texture features. - The setting is ignored if none of these feature families are being computed. The following options are - available: - - * "fixed_bin_number": The intensity range within the mask is divided into a fixed number of bins, - defined by the ``base_discretisation_bin_width`` parameter. - * "fixed_bin_size": The intensity range is divided into bins with a fixed width, defined using the - ``base_discretisation_bin_width`` parameter. The lower bound of the range is determined from the lower - bound of the mask resegmentation range, see the ``resegmentation_intensity_range`` in - :class:`~mirp.settings.settingsMaskResegmentation.ResegmentationSettingsClass`. CT images have a default - lower bound of the initial bin at -1000.0 and PET images have a default lower bound at 0.0. Other images, - including MRI, normalised CT and PET images and filtered images, do not have a default value. - * "fixed_bin_size_pyradiomics": The intensity range is divided into bins with a fixed width. This follows the - non-IBSI compliant implementation in the pyradiomics package. - * "none": The intensity range is not discretised into bins. This method can only be used if the image - intensities are integer and strictly positive. - - There is no default method. Multiple methods can be specified as a list to yield features according to each - method. - - .. warning:: - The "fixed_bin_size_pyradiomics" is not IBSI compliant, and should only be used when - reproducing results from studies that used pyradiomics. - - base_discretisation_n_bins: int or list of int - Number of bins used for the "fixed_bin_number" discretisation method. No default value. Multiple values can - be specified in a list to yield features according to each number of bins. - - base_discretisation_bin_width: float or list of float - Width of each bin in the "fixed_bin_size" discretisation method. No default value. Multiple values can be - specified in a list to yield features according to each bin width. - - ivh_discretisation_method: {"fixed_bin_number", "fixed_bin_size", "none"}, optional, default: "none" - Method used for discretising intensities for computing intensity-volume histograms. The discretisation - methods follow those in ``base_discretisation_method``. The "none" method changes to "fixed_bin_number" if - the underlying data are not suitable. - - ivh_discretisation_n_bins: int, optional, default: 1000 - Number of bins used for the "fixed_bin_number" discretisation method. - - ivh_discretisation_bin_width: float, optional - Width of each bin in the "fixed_bin_size" discretisation method. No default value. - - glcm_distance: float or list of float, optional, default: 1.0 - Distance (in voxels) for GLCM for determining the neighbourhood. Chebyshev, or checkerboard, distance is - used. A value of 1.0 will therefore consider all (diagonally) adjacent voxels as its neighbourhood. A list of - values can be provided to compute GLCM features at different scales. - - glcm_spatial_method: {"2d_average", "2d_slice_merge", "2.5d_direction_merge", "2.5d_volume_merge", "3d_average", "3d_volume_merge"}, optional - Determines how co-occurrence matrices are formed and aggregated. One of the following: - - * "2d_average": features are computed from all matrices then averaged [IBSI:BTW3]. - * "2d_slice_merge": matrices in the same slice are merged, features computed and then averaged [IBSI:SUJT]. - * "2.5d_direction_merge": matrices for the same direction are merged, features computed and then averaged - [IBSI:JJUI]. - * "2.5d_volume_merge": all matrices are merged and a single feature is calculated [IBSI:ZW7Z]. - * "3d_average": features are computed from all matrices then averaged [IBSI:ITBB]. - * "3d_volume_merge": all matrices are merged and a single feature is computed from the merged matrix - [IBSI:IAZD]. - - A list of values may be provided to extract features for multiple spatial methods. Default: "2d_slice_merge" - (``by_slice = False``) or "3d_volume_merge" (``by_slice = True``). - - glrlm_spatial_method: {"2d_average", "2d_slice_merge", "2.5d_direction_merge", "2.5d_volume_merge", "3d_average", "3d_volume_merge"}, optional - Determines how run length matrices are formed and aggregated. One of the following: - - * "2d_average": features are calculated from all matrices then averaged [IBSI:BTW3]. - * "2d_slice_merge": matrices in the same slice are merged, features computed and then averaged [IBSI:SUJT]. - * "2.5d_direction_merge": matrices for the same direction are merged, features computed and then averaged - [IBSI:JJUI]. - * "2.5d_volume_merge": all matrices are merged and a single feature is computed [IBSI:ZW7Z]. - * "3d_average": features are computed from all matrices then averaged [IBSI:ITBB]. - * "3d_volume_merge": all matrices are merged and a single feature is computed from the merged matrix - [IBSI:IAZD]. - - A list of values may be provided to extract features for multiple spatial methods. Default: - "2d_slice_merge" (``by_slice = False``) or "3d_volume_merge" (``by_slice = True``). - - glszm_spatial_method: {"2d", "2.5d", "3d"}, optional - Determines how the size zone matrices are formed and aggregated. One of the following: - - * "2d": features are computed from individual matrices and subsequently averaged [IBSI:8QNN]. - * "2.5d": all 2D matrices are merged and features are computed from this single matrix [IBSI:62GR]. - * "3d": features are computed from a single 3D matrix [IBSI:KOBO]. - - A list of values may be provided to extract features for multiple spatial methods. Default: "2d" - (``by_slice = False``) or "3d" (``by_slice = True``). - - gldzm_spatial_method: {"2d", "2.5d", "3d"}, optional - Determines how the distance zone matrices are formed and aggregated. One of the following: - - * "2d": features are computed from individual matrices and subsequently averaged [IBSI:8QNN]. - * "2.5d": all 2D matrices are merged and features are computed from this single matrix [IBSI:62GR]. - * "3d": features are computed from a single 3D matrix [IBSI:KOBO]. - - A list of values may be provided to extract features for multiple spatial methods. Default: "2d" - (``by_slice = False``) or "3d" (``by_slice = True``). - - ngtdm_spatial_method: {"2d", "2.5d", "3d"}, optional - Determines how the neighbourhood grey tone difference matrices are formed and aggregated. One of the - following: - - * "2d": features are computed from individual matrices and subsequently averaged [IBSI:8QNN]. - * "2.5d": all 2D matrices are merged and features are computed from this single matrix [IBSI:62GR]. - * "3d": features are computed from a single 3D matrix [IBSI:KOBO]. - - A list of values may be provided to extract features for multiple spatial methods. Default: "2d" - (``by_slice = False``) or "3d" (``by_slice = True``). - - ngldm_distance: float or list of float, optional, default: 1.0 - Distance (in voxels) for NGLDM for determining the neighbourhood. Chebyshev, or checkerboard, distance is - used. A value of 1.0 will therefore consider all (diagonally) adjacent voxels as its neighbourhood. A list of - values can be provided to compute NGLDM features at different scales. - - ngldm_difference_level: float or list of float, optional, default: 0.0 - Difference level (alpha) for NGLDM. Determines which bins are grouped together in the matrix. - - ngldm_spatial_method: {"2d", "2.5d", "3d"}, optional - Determines how the neighbourhood grey level dependence matrices are formed and aggregated. One of the - following: - - * "2d": features are computed from individual matrices and subsequently averaged [IBSI:8QNN]. - * "2.5d": all 2D matrices are merged and features are computed from this single matrix [IBSI:62GR]. - * "3d": features are computed from a single 3D matrix [IBSI:KOBO]. - - A list of values may be provided to extract features for multiple spatial methods. Default: "2d" - (``by_slice = False``) or "3d" (``by_slice = True``). - - **kwargs: dict, optional - Unused keyword arguments. - """ - - def __init__( - self, - by_slice: bool = False, - no_approximation: bool = False, - ibsi_compliant: bool = True, - base_feature_families: Union[None, str, List[str]] = "none", - base_discretisation_method: Union[None, str, List[str]] = None, - base_discretisation_n_bins: Union[None, int, List[int]] = None, - base_discretisation_bin_width: Union[None, float, List[float]] = None, - ivh_discretisation_method: str = "none", - ivh_discretisation_n_bins: Union[None, int] = 1000, - ivh_discretisation_bin_width: Union[None, float] = None, - glcm_distance: Union[float, List[float]] = 1.0, - glcm_spatial_method: Union[None, str, List[str]] = None, - glrlm_spatial_method: Union[None, str, List[str]] = None, - glszm_spatial_method: Union[None, str, List[str]] = None, - gldzm_spatial_method: Union[None, str, List[str]] = None, - ngtdm_spatial_method: Union[None, str, List[str]] = None, - ngldm_distance: Union[float, List[float]] = 1.0, - ngldm_difference_level: Union[float, List[float]] = 0.0, - ngldm_spatial_method: Union[None, str, List[str]] = None, - **kwargs - ): - # Set by slice. - self.by_slice: bool = by_slice - - # Set approximation flag. - self.no_approximation: bool = no_approximation - - # Set IBSI-compliance flag. - self.ibsi_compliant: bool = ibsi_compliant - - if base_feature_families is None: - base_feature_families = "none" - - # Check families. - if not isinstance(base_feature_families, list): - base_feature_families = [base_feature_families] - - # Check which entries are valid. - valid_families: List[bool] = [ii in self.get_available_families() for ii in base_feature_families] - - if not all(valid_families): - raise ValueError( - f"One or more families in the base_feature_families parameter were not recognised: " - f"{', '.join([base_feature_families[ii] for ii, is_valid in enumerate(valid_families) if not is_valid])}") - - # Set families. - self.families: List[str] = base_feature_families - - if not self.has_any_feature_family(): - self.families = ["none"] - - if self.has_discretised_family(): - # Check if discretisation_method is None. - if base_discretisation_method is None: - raise ValueError("The base_discretisation_method parameter has no default and must be set.") - - if not isinstance(base_discretisation_method, list): - base_discretisation_method = [base_discretisation_method] - - if not all(discretisation_method in [ - "fixed_bin_size", "fixed_bin_number", "fixed_bin_size_pyradiomics", "none" - ] for discretisation_method in base_discretisation_method): - raise ValueError( - "Available values for the base_discretisation_method parameter are " - "'fixed_bin_number', 'fixed_bin_size', 'fixed_bin_size_pyradiomics' and 'none'. " - "One or more values were not recognised.") - - # Check discretisation_n_bins - if "fixed_bin_number" in base_discretisation_method: - if base_discretisation_n_bins is None: - raise ValueError("The base_discretisation_n_bins parameter has no default and must be set") - - if not isinstance(base_discretisation_n_bins, list): - base_discretisation_n_bins = [base_discretisation_n_bins] - - if not all(isinstance(n_bins, int) for n_bins in base_discretisation_n_bins): - raise TypeError( - "The base_discretisation_n_bins parameter is expected to contain integers with " - "value 2 or larger. Found one or more values that were not integers.") - - if not all(n_bins >= 2 for n_bins in base_discretisation_n_bins): - raise ValueError( - "The base_discretisation_n_bins parameter is expected to contain integers with " - "value 2 or larger. Found one or more values that were less than 2.") - - else: - base_discretisation_n_bins = None - - # Check discretisation_bin_width - if "fixed_bin_size" in base_discretisation_method or "fixed_bin_size_pyradiomics" in base_discretisation_method: - if base_discretisation_bin_width is None: - raise ValueError( - "The base_discretisation_bin_width parameter has no default value and must be set.") - - if not isinstance(base_discretisation_bin_width, list): - base_discretisation_bin_width = [base_discretisation_bin_width] - - if not all(isinstance(bin_size, float) for bin_size in base_discretisation_bin_width): - raise TypeError( - "The base_discretisation_bin_width parameter is expected to contain floating " - "point values greater than 0.0. Found one or more values that were not floating " - "points.") - - if not all(bin_size > 0.0 for bin_size in base_discretisation_bin_width): - raise ValueError( - "The base_discretisation_bin_width parameter is expected to contain floating " - "point values greater than 0.0. Found one or more values that were 0.0 or less.") - - else: - base_discretisation_bin_width = None - - else: - base_discretisation_method = None - base_discretisation_n_bins = None - base_discretisation_bin_width = None - - # Set discretisation method-related parameters. - self.discretisation_method: Union[None, List[str]] = base_discretisation_method - self.discretisation_n_bins: Union[None, List[int]] = base_discretisation_n_bins - self.discretisation_bin_width: Union[None, List[float]] = base_discretisation_bin_width - - if self.has_ivh_family(): - if ivh_discretisation_method not in ["fixed_bin_size", "fixed_bin_number", "none"]: - raise ValueError( - "Available values for the ivh_discretisation_method parameter are 'fixed_bin_size', " - "'fixed_bin_number', and 'none'. One or more values were not recognised.") - - # Check discretisation_n_bins - if "fixed_bin_number" in ivh_discretisation_method: - - if not isinstance(ivh_discretisation_n_bins, int): - raise TypeError( - "The ivh_discretisation_n_bins parameter is expected to be an integer with " - "value 2 or greater. Found: a value that was not an integer.") - - if not ivh_discretisation_n_bins >= 2: - raise ValueError( - "The ivh_discretisation_n_bins parameter is expected to be an integer with " - f"value 2 or greater. Found: {ivh_discretisation_n_bins}") - - else: - ivh_discretisation_n_bins = None - - # Check discretisation_bin_width - if "fixed_bin_size" in ivh_discretisation_method: - - if not isinstance(ivh_discretisation_bin_width, float): - raise TypeError( - "The ivh_discretisation_bin_width parameter is expected to be a floating " - "point value greater than 0.0. Found a value that was not a floating point.") - - if not ivh_discretisation_bin_width > 0.0: - raise ValueError( - "The ivh_discretisation_bin_width parameter is expected to be a floating " - f"point value greater than 0.0. Found: {ivh_discretisation_bin_width}") - - else: - ivh_discretisation_bin_width = None - - else: - ivh_discretisation_method = None - ivh_discretisation_n_bins = None - ivh_discretisation_bin_width = None - - # Set parameters - self.ivh_discretisation_method: Union[None, str] = ivh_discretisation_method - self.ivh_discretisation_n_bins: Union[None, int] = ivh_discretisation_n_bins - self.ivh_discretisation_bin_width: Union[None, float] = ivh_discretisation_bin_width - - # Set GLCM attributes. - if self.has_glcm_family(): - # Check distance parameter. - if not isinstance(glcm_distance, list): - glcm_distance = [glcm_distance] - - if not all(isinstance(distance, float) for distance in glcm_distance): - raise TypeError( - "The glcm_distance parameter is expected to contain floating point values of 1.0 " - "or greater. Found one or more values that were not floating points.") - - if not all(distance >= 1.0 for distance in glcm_distance): - raise ValueError( - "The glcm_distance parameter is expected to contain floating point values of 1.0 " - "or greater. Found one or more values that were less than 1.0.") - - # Check spatial method. - glcm_spatial_method = self.check_valid_directional_spatial_method( - glcm_spatial_method, - "glcm_spatial_method") - - else: - glcm_distance = None - glcm_spatial_method = None - - self.glcm_distance: Union[None, List[float]] = glcm_distance - self.glcm_spatial_method: Union[None, List[str]] = glcm_spatial_method - - # Set GLRLM attributes. - if self.has_glrlm_family(): - # Check spatial method. - glrlm_spatial_method = self.check_valid_directional_spatial_method( - glrlm_spatial_method, "glrlm_spatial_method") - - else: - glrlm_spatial_method = None - - self.glrlm_spatial_method: Union[None, List[str]] = glrlm_spatial_method - - # Set GLSZM attributes. - if self.has_glszm_family(): - # Check spatial method. - glszm_spatial_method = self.check_valid_omnidirectional_spatial_method( - glszm_spatial_method, "glszm_spatial_method") - else: - glszm_spatial_method = None - - self.glszm_spatial_method: Union[None, List[str]] = glszm_spatial_method - - # Set GLDZM attributes. - if self.has_gldzm_family(): - # Check spatial method. - gldzm_spatial_method = self.check_valid_omnidirectional_spatial_method( - gldzm_spatial_method, "gldzm_spatial_method") - - else: - gldzm_spatial_method = None - - self.gldzm_spatial_method: Union[None, List[str]] = gldzm_spatial_method - - # Set NGTDM attributes. - if self.has_ngtdm_family(): - # Check spatial method - ngtdm_spatial_method = self.check_valid_omnidirectional_spatial_method( - ngtdm_spatial_method, "ngtdm_spatial_method") - - else: - ngtdm_spatial_method = None - - self.ngtdm_spatial_method: Union[None, List[str]] = ngtdm_spatial_method - - # Set NGLDM attributes - if self.has_ngldm_family(): - - # Check distance. - if not isinstance(ngldm_distance, list): - ngldm_distance = [ngldm_distance] - - if not all(isinstance(distance, float) for distance in ngldm_distance): - raise TypeError( - "The ngldm_distance parameter is expected to contain floating point values of 1.0 " - "or greater. Found one or more values that were not floating points.") - - if not all(distance >= 1.0 for distance in ngldm_distance): - raise ValueError( - "The ngldm_distance parameter is expected to contain floating point values of 1.0 " - "or greater. Found one or more values that were less than 1.0.") - - # Check spatial method - ngldm_spatial_method = self.check_valid_omnidirectional_spatial_method( - ngldm_spatial_method, "ngldm_spatial_method") - - # Check difference level. - if not isinstance(ngldm_difference_level, list): - ngldm_difference_level = [ngldm_difference_level] - - if not all(isinstance(difference, float) for difference in ngldm_difference_level): - raise TypeError( - "The ngldm_difference_level parameter is expected to contain floating point values of 0.0 " - "or greater. Found one or more values that were not floating points.") - - if not all(difference >= 0.0 for difference in ngldm_difference_level): - raise ValueError( - "The ngldm_difference_level parameter is expected to contain floating point values " - "of 0.0 or greater. Found one or more values that were less than 0.0.") - - else: - ngldm_spatial_method = None - ngldm_distance = None - ngldm_difference_level = None - - self.ngldm_dist: Union[None, List[float]] = ngldm_distance - self.ngldm_diff_lvl: Union[None, List[float]] = ngldm_difference_level - self.ngldm_spatial_method: Union[None, List[str]] = ngldm_spatial_method - - @staticmethod - def get_available_families(): - return [ - "mrp", "morph", "morphology", "morphological", "li", "loc.int", "loc_int", "local_int", "local_intensity", - "st", "stat", "stats", "statistics", "statistical", "ih", "int_hist", "int_histogram", "intensity_histogram", - "ivh", "int_vol_hist", "intensity_volume_histogram", "cm", "glcm", "grey_level_cooccurrence_matrix", - "cooccurrence_matrix", "rlm", "glrlm", "grey_level_run_length_matrix", "run_length_matrix", - "szm", "glszm", "grey_level_size_zone_matrix", "size_zone_matrix", "dzm", "gldzm", - "grey_level_distance_zone_matrix", "distance_zone_matrix", "tdm", "ngtdm", - "neighbourhood_grey_tone_difference_matrix", "grey_tone_difference_matrix", "ldm", "ngldm", - "neighbouring_grey_level_dependence_matrix", "grey_level_dependence_matrix", "all", "none" - ] - - def has_any_feature_family(self): - return not any(family == "none" for family in self.families) - - def has_discretised_family(self): - return self.has_ih_family() or self.has_glcm_family() or self.has_glrlm_family() or self.has_glszm_family() \ - or self.has_gldzm_family() or self.has_ngtdm_family() or self.has_ngldm_family() - - def has_morphology_family(self): - return any(family in ["mrp", "morph", "morphology", "morphological", "all"] for family in self.families) - - def has_local_intensity_family(self): - return any(family in ["li", "loc.int", "loc_int", "local_int", "local_intensity", "all"] for family in self.families) - - def has_stats_family(self): - return any(family in ["st", "stat", "stats", "statistics", "statistical", "all"] for family in self.families) - - def has_ih_family(self): - return any(family in ["ih", "int_hist", "int_histogram", "intensity_histogram", "all"] for family in self.families) - - def has_ivh_family(self): - return any(family in ["ivh", "int_vol_hist", "intensity_volume_histogram", "all"] for family in self.families) - - def has_glcm_family(self): - return any(family in ["cm", "glcm", "grey_level_cooccurrence_matrix", "cooccurrence_matrix", "all"] for family in self.families) - - def has_glrlm_family(self): - return any(family in ["rlm", "glrlm", "grey_level_run_length_matrix", "run_length_matrix", "all"] for family in self.families) - - def has_glszm_family(self): - return any(family in ["szm", "glszm", "grey_level_size_zone_matrix", "size_zone_matrix", "all"] for family in self.families) - - def has_gldzm_family(self): - return any(family in ["dzm", "gldzm", "grey_level_distance_zone_matrix", "distance_zone_matrix", "all"] for family in self.families) - - def has_ngtdm_family(self): - return any(family in ["tdm", "ngtdm", "neighbourhood_grey_tone_difference_matrix", "grey_tone_difference_matrix", "all"] for family in self.families) - - def has_ngldm_family(self): - return any(family in ["ldm", "ngldm", "neighbouring_grey_level_dependence_matrix", "grey_level_dependence_matrix", "all"] for family in self.families) - - def check_valid_directional_spatial_method(self, x, var_name): - - # Set defaults - if x is None and self.by_slice: - x = ["2d_slice_merge"] - - elif x is None and not self.by_slice: - x = ["3d_volume_merge"] - - # Check that x is a list. - if not isinstance(x, list): - x = [x] - - all_spatial_method = ["2d_average", "2d_slice_merge", "2.5d_direction_merge", "2.5d_volume_merge"] - if not self.by_slice: - all_spatial_method += ["3d_average", "3d_volume_merge"] - - # Check that x contains strings. - if not all(isinstance(spatial_method, str) for spatial_method in x): - raise TypeError( - f"The {var_name} parameter expects one or more of the following values: " - f"{', '.join(all_spatial_method)}. Found: one or more values that were not strings.") - - # Check spatial method. - valid_spatial_method = [spatial_method in all_spatial_method for spatial_method in x] - - if not all(valid_spatial_method): - raise ValueError( - f"The {var_name} parameter expects one or more of the following values: " - f"{', '.join(all_spatial_method)}. Found: " - f"{', '.join([spatial_method for spatial_method in x if spatial_method in all_spatial_method])}") - - return x - - def check_valid_omnidirectional_spatial_method(self, x, var_name): - - # Set defaults - if x is None and self.by_slice: - x = ["2d"] - - elif x is None and not self.by_slice: - x = ["3d"] - - # Check that x is a list. - if not isinstance(x, list): - x = [x] - - all_spatial_method = ["2d", "2.5d"] - if not self.by_slice: - all_spatial_method += ["3d"] - - # Check that x contains strings. - if not all(isinstance(spatial_method, str) for spatial_method in x): - raise TypeError( - f"The {var_name} parameter expects one or more of the following values: " - f"{', '.join(all_spatial_method)}. Found: one or more values that were not strings.") - - # Check spatial method. - valid_spatial_method = [spatial_method in all_spatial_method for spatial_method in x] - - if not all(valid_spatial_method): - raise ValueError( - f"The {var_name} parameter expects one or more of the following values: " - f"{', '.join(all_spatial_method)}. Found: " - f"{', '.join([spatial_method for spatial_method in x if spatial_method in all_spatial_method])}") - - return x
- - - -def get_feature_extraction_settings() -> list[dict[str, Any]]: - return [ - setting_def("ibsi_compliant", "bool", test=True), - setting_def( - "base_feature_families", "str", to_list=True, xml_key=["feature_families", "families"], - class_key="families", test=["all"] - ), - setting_def( - "base_discretisation_method", "str", to_list=True, xml_key=["discretisation_method", "discr_method"], - class_key="discretisation_method", test=["fixed_bin_size", "fixed_bin_number"] - ), - setting_def( - "base_discretisation_n_bins", "int", to_list=True, xml_key=["discretisation_n_bins", "discr_n_bins"], - class_key="discretisation_n_bins", test=[10, 33] - ), - setting_def( - "base_discretisation_bin_width", "float", to_list=True, - xml_key=["discretisation_bin_width", "discr_bin_width"], class_key="discretisation_bin_width", - test=[10.0, 34.0] - ), - setting_def( - "ivh_discretisation_method", "str", xml_key=["ivh_discretisation_method", "ivh_discr_method"], - class_key="ivh_discretisation_method", test="fixed_bin_size" - ), - setting_def( - "ivh_discretisation_n_bins", "int", xml_key=["ivh_discretisation_n_bins", "ivh_discr_n_bins"], - test=20 - ), - setting_def( - "ivh_discretisation_bin_width", "float", xml_key=["ivh_discretisation_bin_width", "ivh_discr_bin_width"], - test=30.0 - ), - setting_def("glcm_distance", "float", to_list=True, xml_key=["glcm_distance", "glcm_dist"], test=[2.0, 3.0]), - setting_def("glcm_spatial_method", "str", to_list=True, test=["2d_average", "2d_slice_merge"]), - setting_def("glrlm_spatial_method", "str", to_list=True, test=["2d_average", "2d_slice_merge"]), - setting_def("glszm_spatial_method", "str", to_list=True, test=["2d", "2.5d"]), - setting_def("gldzm_spatial_method", "str", to_list=True, test=["2d", "2.5d"]), - setting_def("ngtdm_spatial_method", "str", to_list=True, test=["2d", "2.5d"]), - setting_def( - "ngldm_distance", "float", to_list=True, xml_key=["ngldm_distance", "ngldm_dist"], - class_key="ngldm_dist", test=[2.5, 3.5] - ), - setting_def( - "ngldm_difference_level", "float", to_list=True, xml_key=["ngldm_difference_level", "ngldm_diff_lvl"], - class_key="ngldm_diff_lvl", test=[1.0, 1.9] - ), - setting_def("ngldm_spatial_method", "str", to_list=True, test=["2d", "2.5d"]) - ] -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/settings/settingsGeneral.html b/docs/_modules/mirp/settings/settingsGeneral.html deleted file mode 100644 index a2c35ea3..00000000 --- a/docs/_modules/mirp/settings/settingsGeneral.html +++ /dev/null @@ -1,195 +0,0 @@ - - - - - - mirp.settings.settingsGeneral — mirp 2.1.0 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mirp.settings.settingsGeneral

-import warnings
-from dataclasses import dataclass
-from typing import Any
-from mirp.settings.utilities import setting_def
-
-
-
-[docs] -@dataclass -class GeneralSettingsClass: - """ - Set of overall process parameters. The most important parameter here is ``by_slice`` which affects how images are - processed and features are computed. - - Parameters - ---------- - by_slice: bool, optional, default: False - Defines whether image processing and computations should be performed in 2D (True) or 3D (False). - - mask_merge: bool, optional, default: False - Defines whether multiple mask objects should be combined into a single mask. - - mask_split: bool, optional, default: False - Defines whether a mask that contains multiple regions should be split into separate mask objects. - - mask_select_largest_region: bool, optional, default: False - Defines whether the largest region within a mask object should be selected. For example, in a mask that - contains multiple separate lesions. ``mask_select_largest_region = True`` will remove all but the largest - lesion. - - mask_select_largest_slice: bool, optional, default: False - Defines whether the largest slice within a mask object should be selected. - - config_str: str, optional - Sets a configuration string, which can be used to differentiate results obtained using other settings. - - no_approximation: bool, optional, default: False - Disables approximation within MIRP. This currently only affects computation of features such as Geary's - c-measure. Can be True or False (default). False means that approximation is performed. - - **kwargs: dict, optional - Unused keyword arguments. - """ - - def __init__( - self, - by_slice: bool = False, - mask_merge: bool = False, - mask_split: bool = False, - mask_select_largest_region: bool = False, - mask_select_largest_slice: bool = False, - config_str: str = "", - no_approximation: bool = False, - **kwargs): - - if not isinstance(by_slice, bool): - raise ValueError("The by_slice parameter should be a boolean.") - - # Set by_slice and select_slice parameters. - self.by_slice: bool = by_slice - - self.mask_merge = mask_merge - self.mask_split = mask_split - self.mask_select_largest_region = mask_select_largest_region - - if mask_select_largest_slice and not by_slice: - warnings.warn("A 2D approach is used as the largest slice is selected.", UserWarning) - self.by_slice = True - - self.mask_select_largest_slice = mask_select_largest_slice - - # Set configuration string. - self.config_str: str = config_str - - # Set approximation of features. - self.no_approximation: bool = no_approximation
- - - -def get_general_settings() -> list[dict[str, Any]]: - return [ - setting_def("by_slice", "bool", test=True), - setting_def("mask_merge", "bool", test=True), - setting_def("mask_split", "bool", test=True), - setting_def("mask_select_largest_region", "bool", test=True), - setting_def("mask_select_largest_slice", "bool", test=True), - setting_def("config_str", "str", test="test_config"), - setting_def("no_approximation", "bool", test=True) - ] -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/settings/settingsGeneric.html b/docs/_modules/mirp/settings/settingsGeneric.html deleted file mode 100644 index bf005a13..00000000 --- a/docs/_modules/mirp/settings/settingsGeneric.html +++ /dev/null @@ -1,270 +0,0 @@ - - - - - - mirp.settings.settingsGeneric — mirp 2.1.0 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mirp.settings.settingsGeneric

-import copy
-
-from mirp.settings.settingsFeatureExtraction import FeatureExtractionSettingsClass
-from mirp.settings.settingsGeneral import GeneralSettingsClass
-from mirp.settings.settingsImageProcessing import ImagePostProcessingClass
-from mirp.settings.settingsImageTransformation import ImageTransformationSettingsClass
-from mirp.settings.settingsInterpolation import ImageInterpolationSettingsClass, MaskInterpolationSettingsClass
-from mirp.settings.settingsMaskResegmentation import ResegmentationSettingsClass
-from mirp.settings.settingsPerturbation import ImagePerturbationSettingsClass
-
-
-
-[docs] -class SettingsClass: - """ - Container for objects used to configure the image processing and feature processing workflow. This object can be - initialised in two ways: - - * By providing (already initialised) configuration objects as arguments. - * By passing arguments to configuration objects as keyword arguments. These configuration objects will then be - created while initialising this container. - - Parameters - ---------- - general_settings: GeneralSettingsClass, optional - Configuration object for parameters related to the general process. See - :class:`~mirp.settings.settingsGeneral.GeneralSettingsClass`. - - post_process_settings: ImagePostProcessingClass, optional - Configuration object for parameters related to image (post-)processing. See - :class:`~mirp.settings.settingsImageProcessing.ImagePostProcessingClass`. - - perturbation_settings: ImagePerturbationSettingsClass, optional - Configuration object for parameters related to image perturbation / augmentation. See - :class:`~mirp.settings.settingPerturbation.ImagePerturbationSettingsClass`. - - img_interpolate_settings: ImageInterpolationSettingsClass, optional - Configuration object for parameters related to image resampling. See - :class:`~mirp.settings.settingsInterpolation.ImageInterpolationSettingsClass`. - - roi_interpolate_settings: MaskInterpolationSettingsClass, optional - Configuration object for parameters related to mask resampling. See - :class:`~mirp.settings.settingsInterpolation.MaskInterpolationSettingsClass`. - - roi_resegment_settings: ResegmentationSettingsClass, optional - Configuration object for parameters related to mask resegmentation. See - :class:`~mirp.settings.settingsMaskResegmentation.ResegmentationSettingsClass`. - - feature_extr_settings: FeatureExtractionSettingsClass, optional - Configuration object for parameters related to feature computation. See - :class:`~mirp.settings.settingsFeatureExtraction.FeatureExtractionSettingsClass`. - - img_transform_settings: ImageTransformationSettingsClass, optional - Configuration object for parameters related to image transformation. See - :class:`~mirp.settings.settingsImageTransformation.ImageTransformationSettingsClass`. - - **kwargs: dict, optional - Keyword arguments for initialising configuration objects stored in this container object. - - See Also - -------- - - * general settings (:class:`~mirp.settings.settingsGeneral.GeneralSettingsClass`) - * image post-processing (:class:`~mirp.settings.settingsImageProcessing.ImagePostProcessingClass`) - * image perturbation / augmentation (:class:`~mirp.settings.settingPerturbation.ImagePerturbationSettingsClass`) - * image interpolation / resampling (:class:`~mirp.settings.settingsInterpolation.ImageInterpolationSettingsClass` - and :class:`~mirp.settings.settingsInterpolation.MaskInterpolationSettingsClass`) - * mask resegmentation (:class:`~mirp.settings.settingsMaskResegmentation.ResegmentationSettingsClass`) - * image transformation (:class:`~mirp.settings.settingsImageTransformation.ImageTransformationSettingsClass`) - * feature computation / extraction ( - :class:`~mirp.settings.settingsFeatureExtraction.FeatureExtractionSettingsClass`) - - """ - def __init__( - self, - general_settings: None | GeneralSettingsClass = None, - post_process_settings: None | ImagePostProcessingClass = None, - perturbation_settings: None | ImagePerturbationSettingsClass = None, - img_interpolate_settings: None | ImageInterpolationSettingsClass = None, - roi_interpolate_settings: None | MaskInterpolationSettingsClass = None, - roi_resegment_settings: None | ResegmentationSettingsClass = None, - feature_extr_settings: None | FeatureExtractionSettingsClass = None, - img_transform_settings: None | ImageTransformationSettingsClass = None, - **kwargs - ): - kwargs = copy.deepcopy(kwargs) - - # General settings. - if general_settings is None: - general_settings = GeneralSettingsClass(**kwargs) - self.general = general_settings - - # Remove by_slice and no_approximation from the keyword arguments to avoid double passing. - kwargs.pop("by_slice", None) - kwargs.pop("no_approximation", None) - - # Image interpolation settings. - if img_interpolate_settings is None: - img_interpolate_settings = ImageInterpolationSettingsClass( - by_slice=general_settings.by_slice, - **kwargs - ) - self.img_interpolate = img_interpolate_settings - - # Mask interpolation settings. - if roi_interpolate_settings is None: - roi_interpolate_settings = MaskInterpolationSettingsClass(**kwargs) - self.roi_interpolate = roi_interpolate_settings - - # Image (post-)processing settings. - if post_process_settings is None: - post_process_settings = ImagePostProcessingClass(**kwargs) - self.post_process = post_process_settings - - # Image perturbation settings. - if perturbation_settings is None: - perturbation_settings = ImagePerturbationSettingsClass(**kwargs) - self.perturbation = perturbation_settings - - # Mask resegmentation settings. - if roi_resegment_settings is None: - roi_resegment_settings = ResegmentationSettingsClass(**kwargs) - self.roi_resegment = roi_resegment_settings - - # Feature extraction settings. - if feature_extr_settings is None: - feature_extr_settings = FeatureExtractionSettingsClass( - by_slice=general_settings.by_slice, - no_approximation=general_settings.no_approximation, - **kwargs - ) - self.feature_extr = feature_extr_settings - - # Image transformation settings - if img_transform_settings is None: - img_transform_settings = ImageTransformationSettingsClass( - by_slice=general_settings.by_slice, - **kwargs - ) - self.img_transform = img_transform_settings - - def __eq__(self, other): - if self.__class__ != other.__class__: - return False - - if self.general != other.general: - return False - if self.img_interpolate != other.img_interpolate: - return False - if self.roi_interpolate != other.roi_interpolate: - return False - if self.post_process != other.post_process: - return False - if self.perturbation != other.perturbation: - return False - if self.roi_resegment != other.roi_resegment: - return False - if self.feature_extr != other.feature_extr: - return False - if self.img_transform != other.img_transform: - return False - - return True
- -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/settings/settingsImageProcessing.html b/docs/_modules/mirp/settings/settingsImageProcessing.html deleted file mode 100644 index 83510015..00000000 --- a/docs/_modules/mirp/settings/settingsImageProcessing.html +++ /dev/null @@ -1,449 +0,0 @@ - - - - - - mirp.settings.settingsImageProcessing — mirp 2.1.0 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mirp.settings.settingsImageProcessing

-import numpy as np
-
-from typing import Union, List, Tuple, Any
-from dataclasses import dataclass
-from mirp.settings.utilities import setting_def
-
-
-
-[docs] -@dataclass -class ImagePostProcessingClass: - """ - Parameters related to image processing. Note that parameters concerning image perturbation / augmentation and - resampling are set separately, see :class:`~mirp.settings.settingPerturbation.ImagePerturbationSettingsClass` and - :class:`~mirp.settings.settingsInterpolation.ImageInterpolationSettingsClass`. - - Parameters - ---------- - bias_field_correction: bool, optional, default: False - Determines whether N4 bias field correction should be performed. When a tissue mask is present, bias field - correction is conducted using the information contained within the mask. Bias-field correction can only be - applied to MR imaging. - - bias_field_correction_n_fitting_levels: int, optional, default: 1 - The number of fitting levels for the N4 bias field correction algorithm. - - bias_field_correction_n_max_iterations: int or list of int, optional, default: 50 - The number of fitting iterations for the N4 bias field algorithm. A single integer, or a list of integers - with a length equal to the number of fitting levels is expected. - - bias_field_convergence_threshold: float, optional, default: 0.001 - Convergence threshold for N4 bias field correction algorithm. - - intensity_normalisation: {"none", "range", "relative_range", "quantile_range", "standardisation"}, default: "none" - Specifies the algorithm used to normalise intensities in the image. Will use only intensities in voxels - masked by the tissue mask (of present). The following are possible: - - * "none": no normalisation - * "range": normalises intensities based on a fixed mapping against the ``intensity_normalisation_range`` - parameter, which is interpreted to represent an intensity range. - * "relative_range": normalises intensities based on a fixed mapping against the ``intensity_normalisation_range`` - parameter, which is interpreted to represent a relative intensity range. - * "quantile_range": normalises intensities based on a fixed mapping against the - ``intensity_normalisation_range`` parameter, which is interpreted to represent a quantile range. - * "standardisation": normalises intensities by subtraction of the mean intensity and division by the standard - deviation of intensities. - - .. note:: - intensity normalisation may remove any physical meaning of intensity units. - - intensity_normalisation_range: list of float, optional - Required for "range", "relative_range", and "quantile_range" intensity normalisation methods, and defines the - intensities that are mapped to the [0.0, 1.0] range during normalisation. The default range depends on the - type of normalisation method: - - * "range": [np.nan, np.nan]: the minimum and maximum intensity value present in the image are used to set the - mapping range. - * "relative_range": [0.0. 1.0]: the minimum (0.0) and maximum (1.0) intensity value present in the image are - used to set the mapping range. - * "quantile_range": [0.025, 0.975] the 2.5th and 97.5th percentiles of the intensities in the image are used - to set the mapping range. - - The lower end of the range is mapped to 0.0 and the upper end to 1.0. However, if intensities below the lower - end or above the upper end are present in the image, values below 0.0 or above 1.0 may be encountered after - normalisation. Use ``intensity_normalisation_saturation`` to cap intensities after normalisation to a - specific range. - - intensity_normalisation_saturation: list of float, optional, default: [np.nan, np.nan] - Defines the start and endpoint for the saturation range. Normalised intensities that lie outside this - range are mapped to the limits of the saturation range, e.g. with a range of [0.0, 0.8] all values greater - than 0.8 are assigned a value of 0.8. np.nan can be used to define limits where the intensity values should - not be saturated. - - tissue_mask_type: {"none", "range", "relative_range"}, optional, default: "relative_range" - Type of algorithm used to produce an approximate tissue mask of the tissue. Such masks can be used to select - pixels for bias correction and intensity normalisation by excluding non-tissue voxels. - - tissue_mask_range: list of float, optional - Range values for creating an approximate mask of the tissue. Required for "range" and "relative_range" - options. Default: [0.02, 1.00] (``"relative_range"``); [np.nan, np.nan] (``"range"``; effectively all voxels - are considered to represent tissue). - - **kwargs: - Unused keyword arguments. - """ - - def __init__( - self, - bias_field_correction: bool = False, - bias_field_correction_n_fitting_levels: int = 1, - bias_field_correction_n_max_iterations: Union[int, List[int], None] = None, - bias_field_convergence_threshold: float = 0.001, - intensity_normalisation: str = "none", - intensity_normalisation_range: Union[List[float], None] = None, - intensity_normalisation_saturation: Union[List[float], None] = None, - tissue_mask_type: str = "relative_range", - tissue_mask_range: Union[List[float], None] = None, - **kwargs - ): - - # Set bias_field_correction parameter - self.bias_field_correction = bias_field_correction - - # Check n_fitting_levels. - if bias_field_correction: - if not isinstance(bias_field_correction_n_fitting_levels, int): - raise TypeError("The bias_field_correction_n_fitting_levels should be integer with value 1 or larger.") - - if bias_field_correction_n_fitting_levels < 1: - raise ValueError( - f"The bias_field_correction_n_fitting_levels should be integer with value 1 or larger. " - f"Found: {bias_field_correction_n_fitting_levels}") - - else: - bias_field_correction_n_fitting_levels = None - - # Set n_fitting_levels. - self.n_fitting_levels: Union[None, int] = bias_field_correction_n_fitting_levels - - # Set default value for bias_field_correction_n_max_iterations. This is the number of iterations per fitting - # level. - if bias_field_correction_n_max_iterations is None and bias_field_correction: - bias_field_correction_n_max_iterations = [50 for ii in range(bias_field_correction_n_fitting_levels)] - - if bias_field_correction: - # Parse to list, if a single value is provided. - if not isinstance(bias_field_correction_n_max_iterations, list): - bias_field_correction_n_max_iterations = [bias_field_correction_n_max_iterations] - - # Ensure that the list of maximum iteration values equals the number of fitting levels. - if bias_field_correction_n_fitting_levels > 1 and len(bias_field_correction_n_max_iterations) == 1: - bias_field_correction_n_max_iterations = [ - bias_field_correction_n_max_iterations[0] - for ii in range(bias_field_correction_n_fitting_levels) - ] - - # Check that the list of maximum iteration values is equal to the number of fitting levels. - if len(bias_field_correction_n_max_iterations) != bias_field_correction_n_fitting_levels: - raise ValueError( - f"The bias_field_correction_n_max_iterations parameter should be a list with a length equal to the" - f" number of fitting levels ({bias_field_correction_n_fitting_levels}). Found list with " - f"{len(bias_field_correction_n_max_iterations)} values.") - - # Check that all values are integers. - if not all(isinstance(ii, int) for ii in bias_field_correction_n_max_iterations): - raise TypeError( - f"The bias_field_correction_n_max_iterations parameter should be a list of positive " - f"integer values. At least one value was not an integer.") - - # Check that all values are positive. - if not all([ii > 0 for ii in bias_field_correction_n_max_iterations]): - raise ValueError( - f"The bias_field_correction_n_max_iterations parameter should be a list of positive " - f"integer values. At least one value was zero or negative.") - - else: - bias_field_correction_n_max_iterations = None - - # Set n_max_iterations attribute. - self.n_max_iterations: Union[List[int], None] = bias_field_correction_n_max_iterations - - # Check that the convergence threshold is a non-negative number. - if bias_field_correction: - - # Check that the value is a float. - if not isinstance(bias_field_convergence_threshold, float): - raise TypeError( - f"The bias_field_convergence_threshold parameter is expected to be a non-negative " - f"floating point value. Found: a value that was not a floating point value.") - - if bias_field_convergence_threshold <= 0.0: - raise TypeError( - f"The bias_field_convergence_threshold parameter is expected to be a non-positive floating point " - f"value. Found: a value that was 0.0 or negative ({bias_field_convergence_threshold}).") - - else: - bias_field_convergence_threshold = None - - # Set convergence_threshold attribute. - self.convergence_threshold: Union[None, float] = bias_field_convergence_threshold - - # Check that intensity_normalisation has the correct values. - if intensity_normalisation not in ["none", "range", "relative_range", "quantile_range", "standardisation"]: - raise ValueError( - f"The intensity_normalisation parameter is expected to have one of the following values: " - f"'none', 'range', 'relative_range', 'quantile_range', 'standardisation'. Found: " - f"{intensity_normalisation}.") - - # Set intensity_normalisation parameter. - self.intensity_normalisation = intensity_normalisation - - # Set default value. - if intensity_normalisation_range is None: - if intensity_normalisation == "range": - # Cannot define a proper range. - intensity_normalisation_range = [np.nan, np.nan] - - elif intensity_normalisation == "relative_range": - intensity_normalisation_range = [0.0, 1.0] - - elif intensity_normalisation == "quantile_range": - intensity_normalisation_range = [0.025, 0.975] - - if intensity_normalisation == "range": - # Check that the range has length 2 and contains floating point values. - if not isinstance(intensity_normalisation_range, list): - raise TypeError( - f"The intensity_normalisation_range parameter for range-based normalisation should " - f"be a list with exactly two values, which are mapped to 0.0 and 1.0 respectively. " - f"Found: an object that is not a list.") - - if len(intensity_normalisation_range) != 2: - raise ValueError( - f"The intensity_normalisation_range parameter for range-based normalisation should " - f"be a list with exactly two values, which are mapped to 0.0 and 1.0 respectively. " - f"Found: list with {len(intensity_normalisation_range)} values.") - - if not all(isinstance(ii, float) for ii in intensity_normalisation_range): - raise TypeError( - f"The intensity_normalisation_range parameter for range-based normalisation should " - f"be a list with exactly two floating point values, which are mapped to 0.0 and 1.0 " - f"respectively. Found: one or more values that are not floating point values.") - - elif intensity_normalisation in ["relative_range", "quantile_range"]: - # Check that the range has length 2 and contains floating point values between 0.0 and 1.0. - if intensity_normalisation == "relative_range": - intensity_normalisation_specifier = "relative range-based normalisation" - else: - intensity_normalisation_specifier = "quantile range-based normalisation" - - if not isinstance(intensity_normalisation_range, list): - raise TypeError( - f"The intensity_normalisation_range parameter for {intensity_normalisation_specifier} " - f"should be a list with exactly two values, which are mapped to 0.0 and 1.0 " - f"respectively. Found: an object that is not a list.") - - if len(intensity_normalisation_range) != 2: - raise ValueError( - f"The intensity_normalisation_range parameter for {intensity_normalisation_specifier} " - f"should be a list with exactly two values, which are mapped to 0.0 and 1.0 " - f"respectively. Found: list with {len(intensity_normalisation_range)} values.") - - if not all(isinstance(ii, float) for ii in intensity_normalisation_range): - raise TypeError( - f"The intensity_normalisation_range parameter for {intensity_normalisation_specifier} " - f"should be a list with exactly two values, which are mapped to 0.0 and 1.0 " - f"respectively. Found: one or more values that are not floating point values.") - - if not all([0.0 <= ii <= 1.0 for ii in intensity_normalisation_range]): - raise TypeError( - f"The intensity_normalisation_range parameter for {intensity_normalisation_specifier} " - f"should be a list with exactly two values, which are mapped to 0.0 and 1.0 " - f"respectively. Found: one or more values that are outside the [0.0, 1.0] range.") - - else: - # None and standardisation do not use this range. - intensity_normalisation_range = None - - # Set normalisation range. - self.intensity_normalisation_range: Union[None, List[float]] = intensity_normalisation_range - - # Check intensity normalisation saturation range. - if intensity_normalisation_saturation is None: - intensity_normalisation_saturation = [np.nan, np.nan] - - if not isinstance(intensity_normalisation_saturation, list): - raise TypeError("The tissue_mask_range parameter is expected to be a list of two floating point values.") - - if not len(intensity_normalisation_saturation) == 2: - raise ValueError( - f"The tissue_mask_range parameter should consist of two values. Found: " - f"{len(intensity_normalisation_saturation)} values.") - - if not all(isinstance(ii, float) for ii in intensity_normalisation_saturation): - raise TypeError("The tissue_mask_range parameter can only contain floating point or np.nan values.") - - # intensity_normalisation_saturation parameter - self.intensity_normalisation_saturation: Union[None, List[float]] = intensity_normalisation_saturation - - # Check tissue_mask_type - if tissue_mask_type not in ["none", "range", "relative_range"]: - raise ValueError( - f"The tissue_mask_type parameter is expected to have one of the following values: " - f"'none', 'range', or 'relative_range'. Found: {tissue_mask_type}.") - - # Set tissue_mask_type - self.tissue_mask_type: str = tissue_mask_type - - # Set the default value for tissue_mask_range. - if tissue_mask_range is None: - if tissue_mask_type == "relative_range": - tissue_mask_range = [0.02, 1.00] - elif tissue_mask_type == "range": - tissue_mask_range = [np.nan, np.nan] - else: - tissue_mask_range = [np.nan, np.nan] - - # Perform checks on tissue_mask_range. - if tissue_mask_type != "none": - if not isinstance(tissue_mask_range, list): - raise TypeError( - "The tissue_mask_range parameter is expected to be a list of two floating point values.") - - if not len(tissue_mask_range) == 2: - raise ValueError( - f"The tissue_mask_range parameter should consist of two values. Found: " - f"{len(tissue_mask_range)} values.") - - if not all(isinstance(ii, float) for ii in tissue_mask_range): - raise TypeError("The tissue_mask_range parameter can only contain floating point or np.nan values.") - - if tissue_mask_type == "relative_range": - if not all([(0.0 <= ii <= 1.0) or np.isnan(ii) for ii in tissue_mask_range]): - raise ValueError( - "The tissue_mask_range parameter should consist of two values between 0.0 and 1.0.") - - # Set tissue_mask_range. - self.tissue_mask_range: Tuple[float, ...] = tuple(tissue_mask_range)
- - - -def get_post_processing_settings() -> list[dict[str, Any]]: - - return [ - setting_def("bias_field_correction", "bool", test=True), - setting_def( - "bias_field_correction_n_fitting_levels", "int", xml_key="n_fitting_levels", - class_key="n_fitting_levels", test=2 - ), - setting_def( - "bias_field_correction_n_max_iterations", "int", xml_key="n_max_iterations", - class_key="n_max_iterations", to_list=True, test=[1000, 1000] - ), - setting_def( - "bias_field_convergence_threshold", "float", xml_key="convergence_threshold", - class_key="convergence_threshold", test=0.1 - ), - setting_def("intensity_normalisation", "str", test="relative_range"), - setting_def("intensity_normalisation_range", "float", to_list=True, test=[0.10, 0.90]), - setting_def("intensity_normalisation_saturation", "float", to_list=True, test=[0.00, 10.00]), - setting_def("tissue_mask_type", "str", test="range"), - setting_def("tissue_mask_range", "float", to_list=True, test=[0.00, 10.00]) - ] -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/settings/settingsImageTransformation.html b/docs/_modules/mirp/settings/settingsImageTransformation.html deleted file mode 100644 index 363fff62..00000000 --- a/docs/_modules/mirp/settings/settingsImageTransformation.html +++ /dev/null @@ -1,1312 +0,0 @@ - - - - - - mirp.settings.settingsImageTransformation — mirp 2.1.0 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mirp.settings.settingsImageTransformation

-import copy
-from typing import Union, List, Any
-from dataclasses import dataclass
-
-import numpy as np
-
-from mirp.settings.settingsFeatureExtraction import FeatureExtractionSettingsClass
-from mirp.settings.utilities import setting_def
-
-
-
-[docs] -@dataclass -class ImageTransformationSettingsClass: - """ - Parameters related to image transformation using filters. Many parameters are conditional on the selected image - filter (``filter_kernels``). By default, only statistical features are computed from filtered images. - - .. note:: - Many feature extraction parameters are copied from - :class:`~mirp.settings.settingsFeatureExtraction.FeatureExtractionSettingsClass`, except - ``response_map_feature_families``, ``response_map_discretisation_method`` and - ``response_map_discretisation_n_bins``. If other parameters need to be changed from their default settings, - first create an object of the current class ( - :class:`~mirp.settings.settingsImageTransformation.ImageTransformationSettingsClass`), and then update the - attributes. - - Parameters - ---------- - by_slice: str or bool, optional, default: False - Defines whether calculations should be performed in 2D (True) or 3D (False), or alternatively only in the - largest slice ("largest"). See :class:`~mirp.settings.settingsGeneral.GeneralSettingsClass`. - - response_map_feature_families: str or list of str, optional, default: "statistics" - Determines the feature families for which features are computed from response maps (filtered images). Radiomics - features are implemented as defined in the IBSI reference manual. The following feature families can be - computed from response maps: - - * Local intensity features: "li", "loc.int", "loc_int", "local_int", and "local_intensity". - * Intensity-based statistical features: "st", "stat", "stats", "statistics", and "statistical". - * Intensity histogram features: "ih", "int_hist", "int_histogram", and "intensity_histogram". - * Intensity-volume histogram features: "ivh", "int_vol_hist", and "intensity_volume_histogram". - * Grey level co-occurrence matrix (GLCM) features: "cm", "glcm", "grey_level_cooccurrence_matrix", - and "cooccurrence_matrix". - * Grey level run length matrix (GLRLM) features: "rlm", "glrlm", "grey_level_run_length_matrix", and - "run_length_matrix". - * Grey level size zone matrix (GLSZM) features: "szm", "glszm", "grey_level_size_zone_matrix", and - "size_zone_matrix". - * Grey level distance zone matrix (GLDZM) features: "dzm", "gldzm", "grey_level_distance_zone_matrix", and - "distance_zone_matrix". - * Neighbourhood grey tone difference matrix (NGTDM) features: "tdm", "ngtdm", - "neighbourhood_grey_tone_difference_matrix", and "grey_tone_difference_matrix". - * Neighbouring grey level dependence matrix (NGLDM) features: "ldm", "ngldm", - "neighbouring_grey_level_dependence_matrix", and "grey_level_dependence_matrix". - - In addition, the following tags can be used: - - * "none": no features are computed. - * "all": all features are computed. - - A list of tags may be provided to select multiple feature families. Morphological features are not computed - from response maps, because these are mask-based and are invariant to filtering. - - response_map_discretisation_method: {"fixed_bin_number", "fixed_bin_size", "fixed_bin_size_pyradiomics", "none"}, optional, default: "fixed_bin_number" - Method used for discretising intensities. Used to compute intensity histogram as well as texture features. - The setting is ignored if none of these feature families are being computed. The following options are - available: - - * "fixed_bin_number": The intensity range within the mask is divided into a fixed number of bins, - defined by the ``base_discretisation_bin_width`` parameter. - * "fixed_bin_size": The intensity range is divided into bins with a fixed width, defined using the - ``base_discretisation_bin_width`` parameter. The lower bound of the range is determined from the lower - bound of the mask resegmentation range, see the ``resegmentation_intensity_range`` in - :class:`~mirp.settings.settingsMaskResegmentation.ResegmentationSettingsClass`. Other images, - including MRI, normalised CT and PET images and filtered images, do not have a default value, and bins are - created from using the minimum intensity as lower bound. - * "fixed_bin_size_pyradiomics": The intensity range is divided into bins with a fixed width. This follows the - non-IBSI compliant implementation in the pyradiomics package. - * "none": The intensity range is not discretised into bins. This method can only be used if the image - intensities are integer and strictly positive. - - Multiple discretisation methods can be specified as a list to yield features according to each method. - - .. note:: - Use of the "fixed_bin_size", "fixed_bin_size_pyradiomics", and "none" discretisation methods is discouraged - for transformed images. Due to transformation, a direct link to any meaningful quantity represented by the - intensity of the original image (e.g. Hounsfield Units for CT, Standardised Uptake Value for PET) is lost. - - response_map_discretisation_n_bins: int or list of int, optional, default: 16 - Number of bins used for the "fixed_bin_number" discretisation method. Multiple values can be specified in a - list to yield features according to each number of bins. - - response_map_discretisation_bin_width: float or list of float, optional - Width of each bin in the "fixed_bin_size" and "fixed_bin_size_pyradiomics" discretisation methods. Multiple - values can be specified in a list to yield features according to each bin width. - - filter_kernels: str or list of str, optional, default: None - Names of the filters applied to the original image to create response maps (filtered images). Filter - implementation follows the IBSI reference manual. The following filters are supported: - - * Mean filters: "mean" - * Gaussian filters: "gaussian", "riesz_gaussian", and "riesz_steered_gaussian" - * Laplacian-of-Gaussian filters: "laplacian_of_gaussian", "log", "riesz_laplacian_of_gaussian", - "riesz_log", "riesz_steered_laplacian_of_gaussian", and "riesz_steered_log". - * Laws kernels: "laws" - * Gabor kernels: "gabor", "riesz_gabor", and "riesz_steered_gabor" - * Separable wavelets: "separable_wavelet" - * Non-separable wavelets: "nonseparable_wavelet", "riesz_nonseparable_wavelet", - and "riesz_steered_nonseparable_wavelet" - - Filters with names that preceded by "riesz" undergo a Riesz transformation. If the filter name is preceded by - "riesz_steered", a steerable riesz filter is used. - - More than one filter name can be provided. By default, no filters are selected, and image transformation is - skipped. - - .. note:: - There is no IBSI reference standard for Gaussian filters. However, the filter implementation is relatively - straightforward, and most likely reproducible. - - .. warning:: - Riesz transformation and steerable riesz transformations are experimental. The implementation of these - filter transformations is complex. Since there is no corresponding IBSI reference standard, any feature - derived from response maps of Riesz transformations is unlikely to be reproducible. - - boundary_condition: {"reflect", "constant", "nearest", "mirror", "wrap"}, optional, default: "mirror" - Sets the boundary condition, which determines how filters behave at the edge of an image. MIRP uses - the same nomenclature for boundary conditions as scipy.ndimage. See the ``mode`` parameter of - `scipy.ndimage.convolve - <https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.convolve.html#scipy.ndimage.convolve>`_ - - separable_wavelet_families: str or list str - Name of separable wavelet kernel as implemented in the ``pywavelets`` package. See `pywt.wavelist( - kind="discrete") <https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html#built-in-wavelets-wavelist>`_ - for options. - - separable_wavelet_set: str or list of str, optional - Filter orientation of separable wavelets. Allows for specifying combinations for high and low-pass filters. - For 2D (``by_slice=True``) filters, the following sets are possible: "hh", "hl", "lh", "ll" (y-x directions). - For 3D (``by_slice=False``) filters, the set of possibilities is larger: "hhh", "hhl", "hlh", "lhh", "hll", - "lhl", "llh", "lll". More than one orientation may be set. Default: "hh" (2d) or "hhh (3d). - - separable_wavelet_stationary: bool, optional, default: True - Determines if wavelets are stationary or not. Stationary wavelets maintain the image dimensions after - decomposition. - - separable_wavelet_decomposition_level: int or list of int, optional, default: 1 - Sets the wavelet decomposition level. For the first decomposition level, the base image is used as input to - generate a response map. For decomposition levels greater than 1, the low-pass image from the previous level - is used as input. More than 1 value may be specified in a list. - - separable_wavelet_rotation_invariance: bool, optional, default: True - Determines whether separable filters are applied in a pseudo-rotational invariant manner. This generates - permutations of the filter and, as a consequence, additional response maps. These maps are then merged using - the pooling method (``separable_wavelet_pooling_method``). - - separable_wavelet_pooling_method: {"max", "min", "mean", "sum"}, optional, default: "max" - Response maps are pooled to create a rotationally invariant response map. This sets the method for - pooling. - - * "max": Each voxel of the pooled response map represents the maximum value for that voxel in the underlying - response maps. - * "min": Each voxel of the pooled response map represents the minimum value for that voxel in the underlying - response maps. - * "mean": Each voxel of the pooled response map represents the mean value for that voxel in the underlying - response maps. For band-pass and high-pass filters, this will likely result in values close to 0.0, - and "max" or "min" pooling methods should be used instead. - * "sum": Each voxel of the pooled response map is the sum of intensities for that voxel in the underlying - response maps. Similar to the "mean" pooling method, but without the normalisation. - - separable_wavelet_boundary_condition: str, optional, default: "mirror" - Sets the boundary condition for separable wavelets. This supersedes any value set by the general - ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options. - - nonseparable_wavelet_families: {"shannon", "simoncelli"} - Name of non-separable wavelet kernels used for image transformation. Shannon and Simoncelli wavelets are - implemented. - - nonseparable_wavelet_decomposition_level: int or list of int, optional, default: 1 - Sets the wavelet decomposition level. Unlike the decomposition level in separable wavelets, decomposition of - non-separable wavelets is purely a filter-based operation. - - nonseparable_wavelet_response: {"modulus", "abs", "magnitude", "angle", "phase", "argument", "real", "imaginary"}, optional, default: "real" - Nonseparable wavelets produce response maps with complex numbers. The complex-valued response map is - converted to a real-valued response map using the specified method. "modulus", "abs", "magnitude" are - synonymous, as are "angle", "phase", and "argument". "real" selects the real component of the complex values, - and "imaginary" selects the imaginary component. - - nonseparable_wavelet_boundary_condition: str, optional, default: "mirror" - Sets the boundary condition for non-separable wavelets. This supersedes any value set by the general - ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options. - - gaussian_sigma: float or list of float, optional - Width of the Gaussian filter in physical dimensions (e.g. mm). Multiple values can be specified. - - gaussian_kernel_truncate: float, optional, default: 4.0 - Width, in units of sigma, at which the filter is truncated. - - gaussian_kernel_boundary_condition: str, optional, default: "mirror" - Sets the boundary condition for Gaussian filters. This supersedes any value set by the general - ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options. - - laplacian_of_gaussian_sigma: float or list of float, optional - Width of the Gaussian filter in physical dimensions (e.g. mm). Multiple values can be specified. - - laplacian_of_gaussian_kernel_truncate: float, optional, default: 4.0 - Width, in sigma, at which the filter is truncated. - - laplacian_of_gaussian_pooling_method: {"max", "min", "mean", "sum", "none"}, optional, default: "none" - Determines whether and how response maps for filters with different widths (``laplacian_of_gaussian_sigma``) - are pooled. - - * "max": Each voxel of the pooled response map represents the maximum value for that voxel in the underlying - response maps. - * "min": Each voxel of the pooled response map represents the minimum value for that voxel in the underlying - response maps. - * "mean": Each voxel of the pooled response map represents the mean value for that voxel in the underlying - response maps. For band-pass and high-pass filters, this will likely result in values close to 0.0, - and "max" or "min" pooling methods should be used instead. - * "sum": Each voxel of the pooled response map is the sum of intensities for that voxel in the underlying - response maps. Similar to the "mean" pooling method, but without the normalisation. - * "none": Each Laplacian-of-Gaussian response map is treated separately, without pooling. - - laplacian_of_gaussian_boundary_condition: str, optional, default: "mirror" - Sets the boundary condition for Laplacian-of-Gaussian filters. This supersedes any value set by the general - ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options. - - laws_kernel: str or list of str, optional - Compute specific Laws kernels these typically are specific combinations of kernels such as L5S5E5, - E5E5E5. The following kernels are available: 'l5', 'e5', 's5', 'w5', 'r5', 'l3', 'e3', 's3'. A combination of - two kernels is expected for 2D (``by_slice=True``), whereas a kernel triplet is expected for 3D filters ( - ``by_slice=False``). - - laws_compute_energy: bool, optional, default: True - Determine whether an energy image should be computed, or just the response map. - - laws_delta: int or list of int, optional, default: 7 - Delta for chebyshev distance between center voxel and neighbourhood boundary used to calculate energy maps. - - laws_rotation_invariance: bool, optional, default: True - Determines whether separable filters are applied in a pseudo-rotational invariant manner. This generates - permutations of the filter and, as a consequence, additional response maps. These maps are then merged using - the pooling method (``laws_pooling_method``). - - laws_pooling_method: {"max", "min", "mean", "sum"}, optional, default: "max" - Response maps are pooled to create a rotationally invariant response map. This sets the method for - pooling. - - * "max": Each voxel of the pooled response map represents the maximum value for that voxel in the underlying - response maps. - * "min": Each voxel of the pooled response map represents the minimum value for that voxel in the underlying - response maps. - * "mean": Each voxel of the pooled response map represents the mean value for that voxel in the underlying - response maps. For band-pass and high-pass filters, this will likely result in values close to 0.0, - and "max" or "min" pooling methods should be used instead. - * "sum": Each voxel of the pooled response map is the sum of intensities for that voxel in the underlying - response maps. Similar to the "mean" pooling method, but without the normalisation. - - laws_boundary_condition: str, optional, default: "mirror" - Sets the boundary condition for Laws filters. This supersedes any value set by the general - ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options. - - gabor_sigma: float or list of float, optional - Width of the Gaussian envelope in physical dimensions (e.g. mm). Multiple values can be specified. - - gabor_lambda: float or list of float, optional - Wavelength of the oscillator component of the Gabor filter, in physical dimensions (e.g. mm). - - gabor_gamma: float or list of float, optional, default: 1.0 - Eccentricity parameter of the Gaussian envelope of the Gabor kernel. Defines width of y-axis relative to - x-axis for 0-angle Gabor kernel. Default: 1.0 - - gabor_theta: float or list of flaot, optional, default: 0.0 - Initial angle of the Gabor filter in degrees (not radians). Multiple angles can be provided. - - gabor_theta_step: float, optional, default: None - Angle step size in degrees for in-plane rotational invariance. A value of 0.0 or None (default) disables - stepping. - - gabor_response: {"modulus", "abs", "magnitude", "angle", "phase", "argument", "real", "imaginary"}, optional, default: "modulus" - Type of response map created by Gabor filters. Gabor kernels consist of complex numbers, and the response map - will be complex as well. The complex-valued response map is converted to a real-valued response map using the - specified method. - - gabor_rotation_invariance: bool, optional, default: False - Determines whether (2D) Gabor filters are applied in a pseudo-rotational invariant manner. If True, - Gabor filters are applied in each of the orthogonal planes. - - gabor_pooling_method: {"max", "min", "mean", "sum"}, optional, default: "max" - Response maps are pooled to create a rotationally invariant response map. This sets the method for - pooling. - - * "max": Each voxel of the pooled response map represents the maximum value for that voxel in the underlying - response maps. - * "min": Each voxel of the pooled response map represents the minimum value for that voxel in the underlying - response maps. - * "mean": Each voxel of the pooled response map represents the mean value for that voxel in the underlying - response maps. For band-pass and high-pass filters, this will likely result in values close to 0.0, - and "max" or "min" pooling methods should be used instead. - * "sum": Each voxel of the pooled response map is the sum of intensities for that voxel in the underlying - response maps. Similar to the "mean" pooling method, but without the normalisation. - - gabor_boundary_condition: str, optional, default: "mirror" - Sets the boundary condition for Gabor filters. This supersedes any value set by the general - ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options. - - mean_filter_kernel_size: int or list of int, optional - Length of the kernel in pixels. Multiple values can be specified to create multiple response maps. - - mean_filter_boundary_condition: str, optional, default: "mirror" - Sets the boundary condition for mean filters. This supersedes any value set by the general - ``boundary_condition`` parameter. See the ``boundary_condition`` parameter above for all valid options. - - riesz_filter_order: float, list of float or list of list of float, optional - Riesz-transformation order. If required, should be a 2 (2D filter), or 3-element (3D filter) integer - vector, e.g. [0,0,1]. Multiple sets can be provided by nesting the list, e.g. [[0, 0, 1], - [0, 1, 0]]. If an integer is provided, a set of filters is created. For example when - riesz_filter_order = 2 and a 2D filter is used, the following Riesz-transformations are performed: [2, - 0], [1, 1] and [0, 2]. - - .. note:: - Riesz filter order uses the numpy coordinate ordering and represents (z, y, x) directions. - - riesz_filter_tensor_sigma: float or list of float, optional - Determines width of Gaussian filter used with Riesz filter banks. - - **kwargs: dict, optional - Unused keyword arguments. - - """ - - def __init__( - self, - by_slice: bool, - response_map_feature_settings: FeatureExtractionSettingsClass | None = None, - response_map_feature_families: Union[None, str, List[str]] = "statistical", - response_map_discretisation_method: Union[None, str, List[str]] = "fixed_bin_number", - response_map_discretisation_n_bins: Union[None, int, List[int]] = 16, - response_map_discretisation_bin_width: Union[None, int, List[int]] = None, - filter_kernels: Union[None, str, List[str]] = None, - boundary_condition: Union[None, str] = "mirror", - separable_wavelet_families: Union[None, str, List[str]] = None, - separable_wavelet_set: Union[None, str, List[str]] = None, - separable_wavelet_stationary: bool = True, - separable_wavelet_decomposition_level: Union[None, int, List[int]] = 1, - separable_wavelet_rotation_invariance: bool = True, - separable_wavelet_pooling_method: str = "max", - separable_wavelet_boundary_condition: Union[None, str] = None, - nonseparable_wavelet_families: Union[None, str, List[str]] = None, - nonseparable_wavelet_decomposition_level: Union[None, int, List[int]] = 1, - nonseparable_wavelet_response: Union[None, str] = "real", - nonseparable_wavelet_boundary_condition: Union[None, str] = None, - gaussian_sigma: Union[None, float, List[float]] = None, - gaussian_kernel_truncate: Union[None, float] = 4.0, - gaussian_kernel_boundary_condition: Union[None, str] = None, - laplacian_of_gaussian_sigma: Union[None, float, List[float]] = None, - laplacian_of_gaussian_kernel_truncate: Union[None, float] = 4.0, - laplacian_of_gaussian_pooling_method: str = "none", - laplacian_of_gaussian_boundary_condition: Union[None, str] = None, - laws_kernel: Union[None, str, List[str]] = None, - laws_delta: Union[int, List[int]] = 7, - laws_compute_energy: bool = True, - laws_rotation_invariance: bool = True, - laws_pooling_method: str = "max", - laws_boundary_condition: Union[None, str] = None, - gabor_sigma: Union[None, float, List[float]] = None, - gabor_lambda: Union[None, float, List[float]] = None, - gabor_gamma: Union[None, float, List[float]] = 1.0, - gabor_theta: Union[None, float, List[float]] = 0.0, - gabor_theta_step: Union[None, float] = None, - gabor_response: str = "modulus", - gabor_rotation_invariance: bool = False, - gabor_pooling_method: str = "max", - gabor_boundary_condition: Union[None, str] = None, - mean_filter_kernel_size: Union[None, int, List[int]] = None, - mean_filter_boundary_condition: Union[None, str] = None, - riesz_filter_order: Union[None, int, List[int]] = None, - riesz_filter_tensor_sigma: Union[None, float, List[float]] = None, - **kwargs - ): - # Set by slice - self.by_slice: bool = by_slice - - # Check filter kernels - if not isinstance(filter_kernels, list): - filter_kernels = [filter_kernels] - - if any(filter_kernel is None for filter_kernel in filter_kernels): - filter_kernels = None - - if filter_kernels is not None: - # Check validity of the filter kernel names. - valid_kernels: List[bool] = [ii in self.get_available_image_filters() for ii in filter_kernels] - - if not all(valid_kernels): - raise ValueError( - f"One or more kernels are not implemented, or were spelled incorrectly: " - f"{', '.join([filter_kernel for ii, filter_kernel in enumerate(filter_kernels) if not valid_kernels[ii]])}") - - self.spatial_filters: Union[None, List[str]] = filter_kernels - - # Check families. - if response_map_feature_families is None: - response_map_feature_families = "none" - - if not isinstance(response_map_feature_families, list): - response_map_feature_families = [response_map_feature_families] - - # Check which entries are valid. - valid_families: List[bool] = [ii in [ - "li", "loc.int", "loc_int", "local_int", "local_intensity", "st", "stat", "stats", "statistics", - "statistical", "ih", "int_hist", "int_histogram", "intensity_histogram", - "ivh", "int_vol_hist", "intensity_volume_histogram", "cm", "glcm", "grey_level_cooccurrence_matrix", - "cooccurrence_matrix", "rlm", "glrlm", "grey_level_run_length_matrix", "run_length_matrix", - "szm", "glszm", "grey_level_size_zone_matrix", "size_zone_matrix", "dzm", "gldzm", - "grey_level_distance_zone_matrix", "distance_zone_matrix", "tdm", "ngtdm", - "neighbourhood_grey_tone_difference_matrix", "grey_tone_difference_matrix", "ldm", "ngldm", - "neighbouring_grey_level_dependence_matrix", "grey_level_dependence_matrix", "all", "none" - ] for ii in response_map_feature_families] - - if not all(valid_families): - raise ValueError( - f"One or more families in the base_feature_families parameter were not recognised: " - f"{', '.join([response_map_feature_families[ii] for ii, is_valid in enumerate(valid_families) if not is_valid])}" - ) - - # Create a temporary feature settings object. If response_map_feature_settings is not present, this object is - # used. Otherwise, response_map_feature_settings is copied, and then updated. - if response_map_feature_settings is None: - - kwargs = copy.deepcopy(kwargs) - kwargs.update({ - "base_feature_families": response_map_feature_families, - "base_discretisation_method": response_map_discretisation_method, - "base_discretisation_bin_width": response_map_discretisation_bin_width, - "base_discretisation_n_bins": response_map_discretisation_n_bins - }) - - response_map_feature_settings = FeatureExtractionSettingsClass( - by_slice=by_slice, - no_approximation=False, - **kwargs - ) - - # Set feature settings. - self.feature_settings: FeatureExtractionSettingsClass = response_map_feature_settings - - # Check boundary condition. - self.boundary_condition = boundary_condition - self.boundary_condition: str = self.check_boundary_condition( - boundary_condition, - "boundary_condition") - - # Check mean filter settings - if self.has_mean_filter(): - # Check filter size. - if not isinstance(mean_filter_kernel_size, list): - mean_filter_kernel_size = [mean_filter_kernel_size] - - if not all(isinstance(kernel_size, int) for kernel_size in mean_filter_kernel_size): - raise TypeError( - f"All kernel sizes for the mean filter are expected to be integer values equal or " - f"greater than 1. Found: one or more kernel sizes that were not integers.") - - if not all(kernel_size >= 1 for kernel_size in mean_filter_kernel_size): - raise ValueError( - f"All kernel sizes for the mean filter are expected to be integer values equal or " - f"greater than 1. Found: one or more kernel sizes less then 1.") - - # Check boundary condition - mean_filter_boundary_condition = self.check_boundary_condition( - mean_filter_boundary_condition, - "mean_filter_boundary_condition") - - else: - mean_filter_kernel_size = None - mean_filter_boundary_condition = None - - self.mean_filter_size: Union[None, List[int]] = mean_filter_kernel_size - self.mean_filter_boundary_condition: Union[None, str] = mean_filter_boundary_condition - - # Check Gaussian kernel settings. - if self.has_gaussian_filter(): - # Check sigma. - gaussian_sigma = self.check_sigma( - gaussian_sigma, - "gaussian_sigma") - - # Check filter truncation. - gaussian_kernel_truncate = self.check_truncation( - gaussian_kernel_truncate, - "gaussian_kernel_truncate") - - # Check boundary condition - gaussian_kernel_boundary_condition = self.check_boundary_condition( - gaussian_kernel_boundary_condition, - "gaussian_kernel_boundary_condition") - - else: - gaussian_sigma = None - gaussian_kernel_truncate = None - gaussian_kernel_boundary_condition = None - - self.gaussian_sigma: Union[None, List[float]] = gaussian_sigma - self.gaussian_sigma_truncate: Union[None, float] = gaussian_kernel_truncate - self.gaussian_boundary_condition: Union[None, str] = gaussian_kernel_boundary_condition - - # Check laplacian-of-gaussian filter settings - if self.has_laplacian_of_gaussian_filter(): - # Check sigma. - laplacian_of_gaussian_sigma = self.check_sigma( - laplacian_of_gaussian_sigma, - "laplacian_of_gaussian_sigma") - - # Check filter truncation. - laplacian_of_gaussian_kernel_truncate = self.check_truncation( - laplacian_of_gaussian_kernel_truncate, - "laplacian_of_gaussian_kernel_truncate") - - # Check pooling method. - laplacian_of_gaussian_pooling_method = self.check_pooling_method( - laplacian_of_gaussian_pooling_method, - "laplacian_of_gaussian_pooling_method", - allow_none=True) - - # Check boundary condition. - laplacian_of_gaussian_boundary_condition = self.check_boundary_condition( - laplacian_of_gaussian_boundary_condition, "laplacian_of_gaussian_boundary_condition") - - else: - laplacian_of_gaussian_sigma = None - laplacian_of_gaussian_kernel_truncate = None - laplacian_of_gaussian_pooling_method = None - laplacian_of_gaussian_boundary_condition = None - - self.log_sigma: Union[None, List[float]] = laplacian_of_gaussian_sigma - self.log_sigma_truncate: Union[None, float] = laplacian_of_gaussian_kernel_truncate - self.log_pooling_method: Union[None, str] = laplacian_of_gaussian_pooling_method - self.log_boundary_condition: Union[None, str] = laplacian_of_gaussian_boundary_condition - - # Check Laws kernel filter settings - if self.has_laws_filter(): - # Check kernel. - laws_kernel = self.check_laws_kernels(laws_kernel, - "laws_kernel") - - # Check energy computation. - if not isinstance(laws_compute_energy, bool): - raise TypeError("The laws_compute_energy parameter is expected to be a boolean value.") - - if laws_compute_energy: - - # Check delta. - if not isinstance(laws_delta, list): - laws_delta = [laws_delta] - - if not all(isinstance(delta, int) for delta in laws_delta): - raise TypeError( - "The laws_delta parameter is expected to be one or more integers with value 0 or " - "greater. Found: one or more values that are not integer.") - - if not all(delta >= 0 for delta in laws_delta): - raise ValueError( - "The laws_delta parameter is expected to be one or more integers with value 0 or " - "greater. Found: one or more values that are less than 0.") - - else: - laws_delta = None - - # Check invariance. - if not isinstance(laws_rotation_invariance, bool): - raise TypeError("The laws_rotation_invariance parameter is expected to be a boolean value.") - - # Check pooling method. - laws_pooling_method = self.check_pooling_method(laws_pooling_method, "laws_pooling_method") - - # Check boundary condition - laws_boundary_condition = self.check_boundary_condition(laws_boundary_condition, "laws_boundary_condition") - - else: - laws_kernel = None - laws_compute_energy = None, - laws_delta = None - laws_rotation_invariance = None - laws_pooling_method = None - laws_boundary_condition = None - - self.laws_calculate_energy: Union[None, bool] = laws_compute_energy - self.laws_kernel: Union[None, List[str]] = laws_kernel - self.laws_delta: Union[None, bool] = laws_delta - self.laws_rotation_invariance: Union[None, bool] = laws_rotation_invariance - self.laws_pooling_method: Union[None, str] = laws_pooling_method - self.laws_boundary_condition: Union[None, str] = laws_boundary_condition - - # Check Gabor filter settings. - if self.has_gabor_filter(): - # Check sigma. - gabor_sigma = self.check_sigma(gabor_sigma, "gabor_sigma") - - # Check gamma. Gamma behaves like sigma. - gabor_gamma = self.check_sigma(gabor_gamma, "gabor_gamma") - - # Check lambda. Lambda behaves like sigma - gabor_lambda = self.check_sigma(gabor_lambda, "gabor_lambda") - - # Check theta step. - if gabor_theta_step is not None: - if not isinstance(gabor_theta_step, (float, int)): - raise TypeError( - "The gabor_theta_step parameter is expected to be an angle, in degrees. Found a " - "value that was not a number.") - - if gabor_theta_step == 0.0: - gabor_theta_step = None - - if gabor_theta_step is not None: - # Check that the step would divide the 360-degree circle into an integer number of steps. - if not (360.0 / gabor_theta_step).is_integer(): - raise ValueError( - f"The gabor_theta_step parameter should divide a circle into equal portions. " - f"The current settings would create {360.0 / gabor_theta_step} portions.") - - # Check theta. - gabor_pool_theta = gabor_theta_step is not None - - if not isinstance(gabor_theta, list): - gabor_theta = [gabor_theta] - - if gabor_theta_step is not None and len(gabor_theta) > 1: - raise ValueError( - f"The gabor_theta parameter cannot have more than one value when used in conjunction" - f" with the gabor_theta_step parameter") - - if not all(isinstance(theta, (float, int)) for theta in gabor_theta): - raise TypeError( - f"The gabor_theta parameter is expected to be one or more values indicating angles in" - f" degrees. Found: one or more values that were not numeric.") - - if gabor_theta_step is not None: - gabor_theta = [gabor_theta[0] + ii for ii in np.arange(0.0, 360.0, gabor_theta_step)] - - # Check filter response. - gabor_response = self.check_response(gabor_response, "gabor_response") - - # Check rotation invariance - if not isinstance(gabor_rotation_invariance, bool): - raise TypeError("The gabor_rotation_invariance parameter is expected to be a boolean value.") - - # Check pooling method - gabor_pooling_method = self.check_pooling_method(gabor_pooling_method, "gabor_pooling_method") - - # Check boundary condition - gabor_boundary_condition = self.check_boundary_condition( - gabor_boundary_condition, "gabor_boundary_condition") - - else: - gabor_sigma = None - gabor_gamma = None - gabor_lambda = None - gabor_theta = None - gabor_pool_theta = None - gabor_response = None - gabor_rotation_invariance = None - gabor_pooling_method = None - gabor_boundary_condition = None - - self.gabor_sigma: Union[None, List[float]] = gabor_sigma - self.gabor_gamma: Union[None, List[float]] = gabor_gamma - self.gabor_lambda: Union[None, List[float]] = gabor_lambda - self.gabor_theta: Union[None, List[float], List[int]] = gabor_theta - self.gabor_pool_theta: Union[None, bool] = gabor_pool_theta - self.gabor_response: Union[None, str] = gabor_response - self.gabor_rotation_invariance: Union[None, str] = gabor_rotation_invariance - self.gabor_pooling_method: Union[None, str] = gabor_pooling_method - self.gabor_boundary_condition: Union[None, str] = gabor_boundary_condition - - # Check separable wavelet settings. - if self.has_separable_wavelet_filter(): - # Check wavelet families. - separable_wavelet_families = self.check_separable_wavelet_families( - separable_wavelet_families, "separable_wavelet_families") - - # Check wavelet filter sets. - separable_wavelet_set = self.check_separable_wavelet_sets(separable_wavelet_set, "separable_wavelet_set") - - # Check if wavelet is stationary - if not isinstance(separable_wavelet_stationary, bool): - raise TypeError(f"The separable_wavelet_stationary parameter is expected to be a boolean value.") - - # Check decomposition level - separable_wavelet_decomposition_level = self.check_decomposition_level( - separable_wavelet_decomposition_level, "separable_wavelet_decomposition_level") - - # Check rotation invariance - if not isinstance(separable_wavelet_rotation_invariance, bool): - raise TypeError("The separable_wavelet_rotation_invariance parameter is expected to be a boolean value.") - - # Check pooling method. - separable_wavelet_pooling_method = self.check_pooling_method( - separable_wavelet_pooling_method, "separable_wavelet_pooling_method") - - # Check boundary condition. - separable_wavelet_boundary_condition = self.check_boundary_condition( - separable_wavelet_boundary_condition, "separable_wavelet_boundary_condition") - - else: - separable_wavelet_families = None - separable_wavelet_set = None - separable_wavelet_stationary = None - separable_wavelet_decomposition_level = None - separable_wavelet_rotation_invariance = None - separable_wavelet_pooling_method = None - separable_wavelet_boundary_condition = None - - self.separable_wavelet_families: Union[None, List[str]] = separable_wavelet_families - self.separable_wavelet_filter_set: Union[None, List[str]] = separable_wavelet_set - self.separable_wavelet_stationary: Union[None, bool] = separable_wavelet_stationary - self.separable_wavelet_decomposition_level: Union[None, List[int]] = separable_wavelet_decomposition_level - self.separable_wavelet_rotation_invariance: Union[None, bool] = separable_wavelet_rotation_invariance - self.separable_wavelet_pooling_method: Union[None, str] = separable_wavelet_pooling_method - self.separable_wavelet_boundary_condition: Union[None, str] = separable_wavelet_boundary_condition - - # Set parameters for non-separable wavelets. - if self.has_nonseparable_wavelet_filter(): - # Check wavelet families. - nonseparable_wavelet_families = self.check_nonseparable_wavelet_families( - nonseparable_wavelet_families, "nonseparable_wavelet_families") - - # Check decomposition level. - nonseparable_wavelet_decomposition_level = self.check_decomposition_level( - nonseparable_wavelet_decomposition_level, "nonseparable_wavelet_decomposition_level") - - # Check filter response. - nonseparable_wavelet_response = self.check_response( - nonseparable_wavelet_response, "nonseparable_wavelet_response") - - # Check boundary condition. - nonseparable_wavelet_boundary_condition = self.check_boundary_condition( - nonseparable_wavelet_boundary_condition, "nonseparable_wavelet_boundary_condition") - - else: - nonseparable_wavelet_families = None - nonseparable_wavelet_decomposition_level = None - nonseparable_wavelet_response = None - nonseparable_wavelet_boundary_condition = None - - self.nonseparable_wavelet_families: Union[None, List[str]] = nonseparable_wavelet_families - self.nonseparable_wavelet_decomposition_level: Union[None, List[int]] = nonseparable_wavelet_decomposition_level - self.nonseparable_wavelet_response: Union[None, str] = nonseparable_wavelet_response - self.nonseparable_wavelet_boundary_condition: Union[None, str] = nonseparable_wavelet_boundary_condition - - # Check Riesz filter orders. - if self.has_riesz_filter(): - riesz_filter_order = self.check_riesz_filter_order(riesz_filter_order, "riesz_filter_order") - - else: - riesz_filter_order = None - - if self.has_steered_riesz_filter(): - riesz_filter_tensor_sigma = self.check_sigma(riesz_filter_tensor_sigma, "riesz_filter_tensor_sigma") - - else: - riesz_filter_tensor_sigma = None - - self.riesz_order: Union[None, List[List[int]]] = riesz_filter_order - self.riesz_filter_tensor_sigma: Union[None, List[float]] = riesz_filter_tensor_sigma - - @staticmethod - def get_available_image_filters(): - return [ - "separable_wavelet", "nonseparable_wavelet", "riesz_nonseparable_wavelet", - "riesz_steered_nonseparable_wavelet", "gaussian", "riesz_gaussian", "riesz_steered_gaussian", - "laplacian_of_gaussian", "log", "riesz_laplacian_of_gaussian", "riesz_steered_laplacian_of_gaussian", - "riesz_log", "riesz_steered_log", "laws", "gabor", "riesz_gabor", "riesz_steered_gabor", "mean" - ] - - def check_boundary_condition(self, x, var_name): - if x is None: - if self.boundary_condition is not None: - # Avoid updating by reference. - x = copy.deepcopy(self.boundary_condition) - - else: - raise ValueError(f"No value for the {var_name} parameter could be set, due to a lack of a default.") - - # Check value - if x not in ["reflect", "constant", "nearest", "mirror", "wrap"]: - raise ValueError( - f"The provided value for the {var_name} is not valid. One of 'reflect', 'constant', " - f"'nearest', 'mirror' or 'wrap' was expected. Found: {x}") - - return x - - @staticmethod - def check_pooling_method(x, var_name, allow_none=False): - - valid_pooling_method = ["max", "min", "mean", "sum"] - if allow_none: - valid_pooling_method += ["none"] - - if x not in valid_pooling_method: - raise ValueError( - f"The {var_name} parameter expects one of the following values: " - f"{', '.join(valid_pooling_method)}. Found: {x}") - - return x - - @staticmethod - def check_sigma(x, var_name): - # Check sigma is a list. - if not isinstance(x, list): - x = [x] - - # Check that the sigma values are floating points. - if not all(isinstance(sigma, float) for sigma in x): - raise TypeError( - f"The {var_name} parameter is expected to consists of floating points with values " - f"greater than 0.0. Found: one or more values that were not floating points.") - - if not all(sigma > 0.0 for sigma in x): - raise ValueError( - f"The {var_name} parameter is expected to consists of floating points with values " - f"greater than 0.0. Found: one or more values with value 0.0 or less.") - - return x - - @staticmethod - def check_truncation(x, var_name): - - # Check that the truncation values are floating points. - if not isinstance(x, float): - raise TypeError( - f"The {var_name} parameter is expected to be a floating point with value " - f"greater than 0.0. Found: a value that was not a floating point.") - - if not x > 0.0: - raise ValueError( - f"The {var_name} parameter is expected to be a floating point with value " - f"greater than 0.0. Found: a value of 0.0 or less.") - - return x - - @staticmethod - def check_response(x, var_name): - - valid_response = ["modulus", "abs", "magnitude", "angle", "phase", "argument", "real", "imaginary"] - - # Check that response is correct. - if x not in valid_response: - raise ValueError( - f"The {var_name} parameter is not correct. Expected one of {', '.join(valid_response)}. " - f"Found: {x}") - - return x - - @staticmethod - def check_separable_wavelet_families(x, var_name): - # Import pywavelets. - import pywt - - if not isinstance(x, list): - x = [x] - - available_kernels = pywt.wavelist(kind="discrete") - valid_kernel = [kernel.lower() in available_kernels for kernel in x] - - if not all(valid_kernel): - raise ValueError( - f"The {var_name} parameter requires wavelet families that match those defined in the " - f"pywavelets package. Could not match: " - f"{', '.join([kernel for ii, kernel in x if not valid_kernel[ii]])}") - - # Return lowercase values. - return [xx.lower() for xx in x] - - @staticmethod - def check_nonseparable_wavelet_families(x, var_name): - if not isinstance(x, list): - x = [x] - - available_kernels = ["simoncelli", "shannon"] - valid_kernel = [kernel.lower() in available_kernels for kernel in x] - - if not all(valid_kernel): - raise ValueError( - f"The {var_name} parameter expects one or more of the following values: " - f"{', '.join(available_kernels)}. Could not match: " - f"{', '.join([kernel for ii, kernel in x if not valid_kernel[ii]])}") - - # Return lowercase values. - return [xx.lower() for xx in x] - - @staticmethod - def check_decomposition_level(x, var_name): - if not isinstance(x, list): - x = [x] - - if not all(isinstance(xx, int) for xx in x): - raise TypeError( - f"The {var_name} parameter should be one or more integer " - f"values of at least 1. Found: one or more values that was not an integer.") - - if not all(xx >= 1 for xx in x): - raise ValueError( - f"The {var_name} parameter should be one or more integer " - f"values of at least 1. Found: one or more values that was not an integer.") - - return x - - def check_separable_wavelet_sets(self, x: Union[None, str, List[str]], var_name): - from itertools import product - - if x is None: - if self.by_slice: - x = "hh" - else: - x = "hhh" - - # Check if x is a list. - if not isinstance(x, list): - x = [x] - - # Generate all potential combinations. - if self.by_slice: - possible_combinations = ["".join(combination) for combination in product(["l", "h"], repeat=2)] - - else: - possible_combinations = ["".join(combination) for combination in product(["l", "h"], repeat=3)] - - # Check for all. - if any(kernel == "all" for kernel in x): - x = possible_combinations - - # Check which kernels are valid. - valid_kernel = [kernel.lower() in possible_combinations for kernel in x] - - if not all(valid_kernel): - raise ValueError( - f"The {var_name} parameter requires combinations of low (l) and high-pass (h) kernels. " - f"Two kernels should be specified for 2D, and three for 3D. Found the following invalid " - f"combinations: " - f"{', '.join([kernel for ii, kernel in enumerate(x) if not valid_kernel[ii]])}") - - # Return lowercase values. - return [xx.lower() for xx in x] - - def check_laws_kernels(self, x: Union[str, List[str]], var_name): - from itertools import product - - # Set implemented kernels. - kernels = ['l5', 'e5', 's5', 'w5', 'r5', 'l3', 'e3', 's3'] - - # Generate all valid combinations. - if self.by_slice: - possible_combinations = ["".join(combination) for combination in product(kernels, repeat=2)] - - else: - possible_combinations = ["".join(combination) for combination in product(kernels, repeat=3)] - - # Create list. - if not isinstance(x, list): - x = [x] - - # Check which kernels are valid. - valid_kernel = [kernel.lower() in possible_combinations for kernel in x] - - if not all(valid_kernel): - raise ValueError( - f"The {var_name} parameter requires combinations of Laws kernels. The follow kernels are " - f"implemented: {', '.join(kernels)}. Two kernels should be specified for 2D, " - f"and three for 3D. Found the following illegal combinations: " - f"{', '.join([kernel for ii, kernel in enumerate(x) if not valid_kernel[ii]])}") - - # Return lowercase values. - return [xx.lower() for xx in x] - - def check_riesz_filter_order(self, x, var_name): - from itertools import product - - # Skip if None - if x is None: - return x - - # Set number of elements that the filter order should have - if self.by_slice: - n_elements = 2 - - else: - n_elements = 3 - - # Create filterbank. - if isinstance(x, int): - # Check that x is not negative. - if x < 0: - raise ValueError(f"The {var_name} parameter cannot be negative.") - - # Set filter order. - single_filter_order = list(range(x+1)) - - # Generate all valid combinations. - x = [list(combination) for combination in product(single_filter_order, repeat=n_elements) if - sum(combination) == x] - - if not isinstance(x, list): - raise TypeError(f"The {var_name} parameter is expected to be a list") - - # Create a nested list, - if not all(isinstance(xx, list) for xx in x): - x = [x] - - # Check that all elements of x have the right length, and do not negative orders. - if not all(len(xx) == n_elements for xx in x): - raise ValueError( - f"The {var_name} parameter is expected to contain filter orders, each consisting of " - f"{n_elements} non-negative integer values. One or more filter orders did not have the " - f"expected number of elements.") - - if not all(all(isinstance(xxx, int) for xxx in xx) for xx in x): - raise ValueError( - f"The {var_name} parameter is expected to contain filter orders, each consisting of " - f"{n_elements} non-negative integer values. One or more filter orders did not fully " - f"consist of integer values.") - - if not all(all(xxx >= 0 for xxx in xx) for xx in x): - raise ValueError( - f"The {var_name} parameter is expected to contain filter orders, each consisting of " - f"{n_elements} non-negative integer values. One or more filter orders contained negative values.") - - return x - - def has_mean_filter(self, x=None): - if x is None: - x = self.spatial_filters - elif not isinstance(x, list): - x = [x] - - return x is not None and any(filter_kernel in ["mean"] for filter_kernel in x) - - def has_gaussian_filter(self, x=None): - if x is None: - x = self.spatial_filters - elif not isinstance(x, list): - x = [x] - - return x is not None and any( - filter_kernel in ["gaussian", "riesz_gaussian", "riesz_steered_gaussian"] for filter_kernel in x) - - def has_laplacian_of_gaussian_filter(self, x=None): - if x is None: - x = self.spatial_filters - elif not isinstance(x, list): - x = [x] - - return x is not None and any( - filter_kernel in [ - "laplacian_of_gaussian", "log", "riesz_laplacian_of_gaussian", "riesz_log", - "riesz_steered_laplacian_of_gaussian", "riesz_steered_log" - ] for filter_kernel in x) - - def has_laws_filter(self, x=None): - if x is None: - x = self.spatial_filters - elif not isinstance(x, list): - x = [x] - - return x is not None and any(filter_kernel in ["laws"] for filter_kernel in x) - - def has_gabor_filter(self, x=None): - if x is None: - x = self.spatial_filters - elif not isinstance(x, list): - x = [x] - - return x is not None and any( - filter_kernel in ["gabor", "riesz_gabor", "riesz_steered_gabor"] for filter_kernel in x) - - def has_separable_wavelet_filter(self, x=None): - if x is None: - x = self.spatial_filters - elif not isinstance(x, list): - x = [x] - - return x is not None and any(filter_kernel in ["separable_wavelet"] for filter_kernel in x) - - def has_nonseparable_wavelet_filter(self, x=None): - if x is None: - x = self.spatial_filters - elif not isinstance(x, list): - x = [x] - - return x is not None and any( - filter_kernel in [ - "nonseparable_wavelet", "riesz_nonseparable_wavelet", "riesz_steered_nonseparable_wavelet" - ] for filter_kernel in x) - - def has_riesz_filter(self, x=None): - if x is None: - x = self.spatial_filters - elif not isinstance(x, list): - x = [x] - - return x is not None and any(filter_kernel.startswith("riesz") for filter_kernel in x) - - def has_steered_riesz_filter(self, x=None): - if x is None: - x = self.spatial_filters - elif not isinstance(x, list): - x = [x] - - return x is not None and any(filter_kernel.startswith("riesz_steered") for filter_kernel in x)
- - - -def get_image_transformation_settings() -> list[dict[str, Any]]: - return [ - setting_def( - "response_map_feature_families", "str", to_list=True, xml_key="feature_families", - class_key="families", test=["statistical", "glcm"] - ), - setting_def( - "response_map_discretisation_method", "str", to_list=True, xml_key="discretisation_method", - class_key="discretisation_method", test=["fixed_bin_size", "fixed_bin_number"] - ), - setting_def( - "response_map_discretisation_n_bins", "int", to_list=True, xml_key="discretisation_n_bins", - class_key="discretisation_n_bins", test=[10, 33] - ), - setting_def( - "response_map_discretisation_bin_width", "float", to_list=True, xml_key="discretisation_bin_width", - class_key="discretisation_bin_width", test=[10.0, 34.0] - ), - setting_def( - "filter_kernels", "float", to_list=True, xml_key=["filter_kernels", "spatial_filters"], - class_key="spatial_filters", test=[ - "separable_wavelet", "nonseparable_wavelet", "riesz_nonseparable_wavelet", - "riesz_steered_nonseparable_wavelet", "gaussian", "riesz_gaussian", "riesz_steered_gaussian", - "laplacian_of_gaussian", "log", "riesz_laplacian_of_gaussian", "riesz_steered_laplacian_of_gaussian", - "riesz_log", "riesz_steered_log", "laws", "gabor", "riesz_gabor", "riesz_steered_gabor", "mean" - ] - ), - setting_def("boundary_condition", "str", test="nearest"), - setting_def("separable_wavelet_families", "str", to_list=True, test=["coif4", "coif5"]), - setting_def( - "separable_wavelet_set", "str", to_list=True, class_key="separable_wavelet_filter_set", - test=["hhh", "lll"] - ), - setting_def("separable_wavelet_stationary", "bool", test=False), - setting_def("separable_wavelet_decomposition_level", "int", to_list=True, test=[1, 2]), - setting_def("separable_wavelet_rotation_invariance", "bool", test=False), - setting_def("separable_wavelet_pooling_method", "str", test="mean"), - setting_def("separable_wavelet_boundary_condition", "str", test="constant"), - setting_def("nonseparable_wavelet_families", "str", to_list=True, test=["simoncelli", "shannon"]), - setting_def("nonseparable_wavelet_decomposition_level", "int", to_list=True, test=[1, 2]), - setting_def("nonseparable_wavelet_response", "str", test="magnitude"), - setting_def("nonseparable_wavelet_boundary_condition", "str", test="constant"), - setting_def("gaussian_sigma", "float", to_list=True, test=[1.0, 3.0]), - setting_def("gaussian_kernel_truncate", "float", class_key="gaussian_sigma_truncate", test=10.0), - setting_def( - "gaussian_kernel_boundary_condition", "str", class_key="gaussian_boundary_condition", test="constant" - ), - setting_def( - "laplacian_of_gaussian_sigma", "float", to_list=True, - xml_key=["laplacian_of_gaussian_sigma", "log_sigma"], class_key="log_sigma", test=[1.0, 3.0] - ), - setting_def( - "laplacian_of_gaussian_kernel_truncate", "float", - xml_key=["laplacian_of_gaussian_kernel_truncate", "log_sigma_truncate"], class_key="log_sigma_truncate", - test=10.0 - ), - setting_def("laplacian_of_gaussian_pooling_method", "str", class_key="log_pooling_method", test="mean"), - setting_def( - "laplacian_of_gaussian_boundary_condition", "str", class_key="log_boundary_condition", test="constant" - ), - setting_def("laws_kernel", "str", to_list=True, test=["l5e5s5", "w5r5l3"]), - setting_def( - "laws_compute_energy", "bool", xml_key="laws_calculate_energy", - class_key="laws_calculate_energy", test=True - ), - setting_def("laws_delta", "int", to_list=True, test=[3, 5]), - setting_def( - "laws_rotation_invariance", "bool", xml_key=["laws_rotation_invariance", "laws_rot_invar"], test=False - ), - setting_def("laws_pooling_method", "str", test="mean"), - setting_def("laws_boundary_condition", "str", test="constant"), - setting_def("gabor_sigma", "float", to_list=True, test=[1.0, 3.0]), - setting_def("gabor_lambda", "float", to_list=True, test=[0.5, 2.0]), - setting_def("gabor_gamma", "float", to_list=True, test=[0.5, 0.75]), - setting_def("gabor_theta", "float", to_list=True, test=[5.0, 15.0]), - setting_def("gabor_theta_step", "float", test=None), - setting_def("gabor_response", "str", test="magnitude"), - setting_def( - "gabor_rotation_invariance", "bool", xml_key=["gabor_rotation_invariance", "gabor_rot_invar"], test=False - ), - setting_def("gabor_pooling_method", "str", test="mean"), - setting_def("gabor_boundary_condition", "str", test="constant"), - setting_def( - "mean_filter_kernel_size", "int", to_list=True, xml_key=["mean_filter_kernel_size", "mean_filter_size"], - class_key="mean_filter_size", test=[3, 7] - ), - setting_def("mean_filter_boundary_condition", "str", test="constant"), - setting_def( - "riesz_filter_order", "int", to_list=True, xml_key=["riesz_filter_order", "riesz_order"], - class_key="riesz_order", test=[2, 1, 0] - ), - setting_def("riesz_filter_tensor_sigma", "float", to_list=True, test=[3.0, 5.0]) - ] -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/settings/settingsInterpolation.html b/docs/_modules/mirp/settings/settingsInterpolation.html deleted file mode 100644 index 83fabf2c..00000000 --- a/docs/_modules/mirp/settings/settingsInterpolation.html +++ /dev/null @@ -1,322 +0,0 @@ - - - - - - mirp.settings.settingsInterpolation — mirp 2.1.0 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mirp.settings.settingsInterpolation

-from typing import Iterable, Any
-from dataclasses import dataclass
-from mirp.settings.utilities import setting_def
-
-
-
-[docs] -@dataclass -class ImageInterpolationSettingsClass: - """ - Parameters related to image interpolating / resampling. Images in a dataset are typically resampled to uniform - voxel spacing to ensure that their spatial representation does not vary between samples. Note that when the - voxel spacing in the original image is smaller than that in the resampled image (e.g., 0.5 mm sampled to 1.0 mm), - antialiasing may be recommended. - - For parameters related to mask interpolation / resampling, see - :class:`~mirp.settings.settingsInterpolation.MaskInterpolationSettingsClass`. - - Parameters - ---------- - by_slice: str or bool, optional, default: False - Defines whether calculations should be performed in 2D (True) or 3D (False), or alternatively only in the - largest slice ("largest"). See :class:`~mirp.settings.settingsGeneral.GeneralSettingsClass`. - - new_spacing: float or list of float or list of list of float, optional: - Sets voxel spacing after interpolation. A single value represents the spacing that will be applied in all - directions. Non-uniform voxel spacing may also be provided, but requires 3 values for z, y, and x directions - (if `by_slice = False`) or 2 values for y and x directions (otherwise). - - Multiple spacings may be defined by creating a nested list, e.g. [[1.0], [1.5], [2.0]] to resample the - same image multiple times to different (here: isotropic) voxel spacings, namely 1.0, 1.5 and 2.0. Units - are defined by the headers of the image files. These are typically millimeters for radiological images. - - spline_order: int, optional, default: 3 - Sets the spline order used for spline interpolation. mirp uses `scipy.ndimage.map_coordinates - <https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html#scipy.ndimage - .map_coordinates>`_ internally. Spline orders 0, 1, and 3 refer to nearest neighbour, linear interpolation - and cubic interpolation, respectively. - - anti_aliasing: bool, optional, default: true - Determines whether to perform antialiasing, which is done to mitigate aliasing artifacts when downsampling. - - smoothing_beta: float, optional, default: 0.98 - Determines the smoothness of the Gaussian filter used for anti-aliasing. A value of 1.00 equates to no - antialiasing, with lower values producing increasingly smooth imaging. Values above 0.90 are recommended. - - **kwargs: dict, optional - Unused keyword arguments. - """ - - def __init__( - self, - by_slice: bool, - new_spacing: None | float | int | list[int] | list[float] | list[list[float]] | list[list[int]] = None, - spline_order: int = 3, - anti_aliasing: bool = True, - smoothing_beta: float = 0.98, - **kwargs - ): - - # Set interpolate parameter. - self.interpolate: bool = new_spacing is not None - - # Check if the spline order is valid. - if spline_order < 0 or spline_order > 5: - raise ValueError( - f"The interpolation spline order should be an integer between 0 and 5. Found: {spline_order}") - - # Set spline order for the interpolating spline. - self.spline_order: int = spline_order - - if self.interpolate: - # Parse value to list of floating point values to facilitate checks. - if isinstance(new_spacing, (int, float)): - new_spacing = [new_spacing] - - # Check if nested list elements are present. - if any(isinstance(ii, Iterable) for ii in new_spacing): - new_spacing = [ - self._check_new_sample_spacing(by_slice=by_slice, new_spacing=new_spacing_element) - for new_spacing_element in new_spacing - ] - - else: - new_spacing = [self._check_new_sample_spacing(by_slice=by_slice, new_spacing=new_spacing)] - - # Check that new spacing is now a nested list. - if not all(isinstance(ii, list) for ii in new_spacing): - raise TypeError(f"THe new_spacing variable should now be represented as a nested list.") - - # Set spacing for resampling. Note that new_spacing should now either be None or a nested list, i.e. a list - # containing other lists. - self.new_spacing: None | list[list[float | None]] = new_spacing - - # Set anti-aliasing. - self.anti_aliasing: bool = anti_aliasing - - # Check that smoothing beta lies between 0.0 and 1.0. - if anti_aliasing: - if smoothing_beta <= 0.0 or smoothing_beta > 1.0: - raise ValueError( - f"The value of the smoothing_beta parameter should lie between 0.0 and 1.0, " - f"not including 0.0. Found: {smoothing_beta}") - - # Set smoothing beta. - self.smoothing_beta: float = smoothing_beta - - @staticmethod - def _check_new_sample_spacing(by_slice, new_spacing): - # Checks whether sample spacing is correctly set, and parses it. - - # Parse value to list of floating point values to facilitate checks. - if isinstance(new_spacing, (int, float)): - new_spacing = [new_spacing] - - # Convert to floating point values. - new_spacing: list[float | None] = [float(new_spacing_element) for new_spacing_element in new_spacing] - - if by_slice: - # New spacing is expect to consist of at most two values when the experiment is based on slices. A - # placeholder for the z-direction is set here. - if len(new_spacing) == 1: - # This creates isotropic spacing. - new_spacing = [None, new_spacing[0], new_spacing[0]] - - elif len(new_spacing) == 2: - # Insert a placeholder for the z-direction. - new_spacing.insert(0, None) - - else: - raise ValueError( - f"The desired voxel spacing for in-slice resampling should consist of two " - f"elements. Found: {len(new_spacing)} elements.") - else: - if len(new_spacing) == 1: - # This creates isotropic spacing. - new_spacing = [new_spacing[0], new_spacing[0], new_spacing[0]] - - elif len(new_spacing) == 3: - # Do nothing. - pass - - else: - raise ValueError( - f"The desired voxel spacing for volumetric resampling should consist of three " - f"elements. Found: {len(new_spacing)} elements.") - - return new_spacing
- - - -def get_image_interpolation_settings() -> list[dict[str, Any]]: - return [ - setting_def("new_spacing", "float", to_list=True, test=[1.0, 1.0, 1.0]), - setting_def("spline_order", "int", test=2), - setting_def("anti_aliasing", "bool", test=False), - setting_def("smoothing_beta", "float", test=0.75) - ] - - -
-[docs] -@dataclass -class MaskInterpolationSettingsClass: - """ - Parameters related to mask interpolation / resampling. MIRP registers the mask to an interpolated image based, - and fewer parameters can be set compared to image interpolation / resampling ( - :class:`~mirp.settings.settingsInterpolation.ImageInterpolationSettingsClass`). - - Parameters - ---------- - roi_spline_order: int, optional, default: 1 - Sets the spline order used for spline interpolation. mirp uses `scipy.ndimage.map_coordinates - <https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html#scipy.ndimage - .map_coordinates>`_ internally. Spline orders 0, 1, and 3 refer to nearest neighbour, linear interpolation - and cubic interpolation, respectively. - - roi_interpolation_mask_inclusion_threshold: float, optional, default: 0.5 - Threshold for partially masked voxels after interpolation. All voxels with a value equal to or greater than - this threshold are assigned to the mask. - - **kwargs: dict, optional - Unused keyword arguments. - """ - - def __init__( - self, - roi_spline_order: int = 1, - roi_interpolation_mask_inclusion_threshold: float = 0.5, - **kwargs): - - # Check if the spline order is valid. - if roi_spline_order < 0 or roi_spline_order > 5: - raise ValueError( - f"The interpolation spline order for the ROI should be an integer between 0 and 5. Found:" - f" {roi_spline_order}") - - # Set spline order. - self.spline_order = roi_spline_order - - # Check if the inclusion threshold is between 0 and 1. - if roi_interpolation_mask_inclusion_threshold <= 0.0 or roi_interpolation_mask_inclusion_threshold > 1.0: - raise ValueError( - f"The inclusion threshold for the ROI mask should be between 0.0 and 1.0, excluding 0.0. " - f"Found: {roi_interpolation_mask_inclusion_threshold}") - - self.incl_threshold = roi_interpolation_mask_inclusion_threshold
- - - -def get_mask_interpolation_settings() -> list[dict[str, Any]]: - return [ - setting_def("roi_spline_order", "int", xml_key="spline_order", class_key="spline_order", test=2), - setting_def("roi_interpolation_mask_inclusion_threshold", "float", xml_key="incl_threshold", - class_key="incl_threshold", test=0.25) - ] -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/settings/settingsMaskResegmentation.html b/docs/_modules/mirp/settings/settingsMaskResegmentation.html deleted file mode 100644 index e4b371e2..00000000 --- a/docs/_modules/mirp/settings/settingsMaskResegmentation.html +++ /dev/null @@ -1,201 +0,0 @@ - - - - - - mirp.settings.settingsMaskResegmentation — mirp 2.1.0 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mirp.settings.settingsMaskResegmentation

-from typing import Any
-from dataclasses import dataclass
-from mirp.settings.utilities import setting_def
-
-import numpy as np
-
-
-
-[docs] -@dataclass -class ResegmentationSettingsClass: - """ - Parameters related to mask resegmentation. Resegmentation is used to remove parts of the mask that correspond to - undesired intensities that should be excluded, e.g. those corresponding to air. Resegmentation based on an - intensity range is also required for using *Fixed Bin Size* discretisation to set the lower bound of the first bin. - - .. note:: - Even though intensity range resegmentation is usually required to perform *Fixed Bin Size* discretisation, - default values are available for computed tomography (CT) and positron emission tomography (PET) imaging, - and are set to -1000.0 Hounsfield Units and 0.0 SUV, respectively. - - Parameters - ---------- - Sets parameters related to resegmentation of the segmentation mask. - - resegmentation_intensity_range: list of float, optional - Intensity threshold for threshold-based re-segmentation ("threshold" and "range"). If set, requires two - values for lower and upper range respectively. The upper range value can also be np.nan for half-open ranges. - - resegmentation_sigma: float, optional - Number of standard deviations for outlier-based intensity re-segmentation ("sigma" and "outlier"). - - **kwargs: dict, optional - Unused keyword arguments. - """ - def __init__( - self, - resegmentation_intensity_range: None | list[float] = None, - resegmentation_sigma: None | float = None, - **kwargs - ): - resegmentation_method = [] - if resegmentation_sigma is None and resegmentation_intensity_range is None: - resegmentation_method += ["none"] - if resegmentation_intensity_range is not None: - resegmentation_method += ["range"] - if resegmentation_sigma is not None: - resegmentation_method += ["sigma"] - - # Set resegmentation method. - self.resegmentation_method: list[str] = resegmentation_method - - # Set default value. - if resegmentation_intensity_range is None: - # Cannot define a proper range. - resegmentation_intensity_range = [np.nan, np.nan] - - if not isinstance(resegmentation_intensity_range, list): - raise TypeError( - f"The resegmentation_intensity_range parameter should be a list with exactly two " - f"values. Found: an object that is not a list.") - - if len(resegmentation_intensity_range) != 2: - raise ValueError( - f"The resegmentation_intensity_range parameter should be a list with exactly two " - f"values. Found: list with {len(resegmentation_intensity_range)} values.") - - if not all(isinstance(ii, float) for ii in resegmentation_intensity_range): - raise TypeError( - f"The resegmentation_intensity_range parameter should be a list with exactly two " - f"values. Found: one or more values that are not floating point values.") - - self.intensity_range: None | tuple[Any, Any] = tuple(resegmentation_intensity_range) if \ - resegmentation_intensity_range is not None else None - - # Set default value. - if resegmentation_sigma is None: - resegmentation_sigma = 3.0 - - # Check that sigma is not negative. - if resegmentation_sigma < 0.0: - raise ValueError(f"The resegmentation_sigma parameter can not be negative. Found: {resegmentation_sigma}") - - self.sigma: float = resegmentation_sigma
- - - -def get_mask_resegmentation_settings() -> list[dict[str, Any]]: - return [ - setting_def( - "resegmentation_intensity_range", "float", to_list=True, xml_key=["intensity_range", "g_thresh"], - class_key="intensity_range", test=[-10.0, 30.0] - ), - setting_def("resegmentation_sigma", "float", xml_key="sigma", class_key="sigma", test=1.0) - ] -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/settings/settingsPerturbation.html b/docs/_modules/mirp/settings/settingsPerturbation.html deleted file mode 100644 index 6511dadb..00000000 --- a/docs/_modules/mirp/settings/settingsPerturbation.html +++ /dev/null @@ -1,370 +0,0 @@ - - - - - - mirp.settings.settingsPerturbation — mirp 2.1.0 documentation - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mirp.settings.settingsPerturbation

-from typing import Union, List, Any
-from dataclasses import dataclass
-from mirp.settings.utilities import setting_def
-
-
-
-[docs] -@dataclass -class ImagePerturbationSettingsClass: - """ - Parameters related to image and mask perturbation / augmentation. By default images and masks are not perturbed or - augmented. - - Parameters - ---------- - - crop_around_roi: bool, optional, default: False - Determines whether the image may be cropped around the regions of interest. Setting - this to True may speed up computation and save memory. - - crop_distance: float, optional, default: 150.0 - Physical distance around the mask that should be maintained when cropping the image. When using convolutional - kernels for filtering an image, we recommend to leave some distance to prevent boundary effects. A crop - distance of 0.0 crops the image tightly around the mask. - - perturbation_noise_repetitions: int, optional, default: 0 - Number of repetitions where noise is randomly added to the image. A value of 0 means that no noise will be - added. - - perturbation_noise_level: float, optional, default: None - Set the noise level in intensity units. This determines the width of the normal distribution used to generate - random noise. If None (default), noise is determined from the image itself. - - perturbation_rotation_angles: float or list of float, optional, default: 0.0 - Angles (in degrees) over which the image and mask are rotated. This rotation is only in the x-y (axial) - plane. Multiple angles can be provided to create images with different rotations. - - perturbation_translation_fraction: float or list of float, optional, default: 0.0 - Sub-voxel translation distance fractions of the interpolation grid. This forces the interpolation grid to - shift slightly and interpolate at different points. Multiple values can be provided. All values should be - between 0.0 and 1.0. - - perturbation_roi_adapt_type: {"fraction", "distance"}, optional, default: "distance" - Determines how the mask is grown or shrunk. Can be either "fraction" or "distance". "fraction" is used to - grow or shrink the mask by a certain fraction (see the ``perturbation_roi_adapt_size`` parameter). - "distance" is used to grow or shrink the mask by a certain physical distance, defined using the - ``perturbation_roi_adapt_size`` parameter. - - perturbation_roi_adapt_size: float or list of float, optional, default: 0.0 - Determines the extent of growth/shrinkage of the ROI mask. The use of this parameter depends on the - growth/shrinkage type (``perturbation_roi_adapt_type``), For "distance", this parameter defines - growth/shrinkage in physical units, typically mm. For "fraction", this parameter defines growth/shrinkage in - volume fraction (e.g. a value of 0.2 grows the mask by 20%). For either type, positive values indicate growing - the mask, whereas negative values indicate its shrinkage. Multiple values can be provided to perturb the - volume of the mask. - - perturbation_roi_adapt_max_erosion: float, optional, default: 0.8 - Limits shrinkage of the mask by distance-based adaptations to avoid forming empty masks. Defined as fraction of - the original volume, e.g. a value of 0.8 prevents shrinking the mask below 80% of its original volume. Only - used when ``perturbation_roi_adapt_type=="distance"``. - - perturbation_randomise_roi_repetitions: int, optional, default: 0.0 - Number of repetitions where the mask is randomised using supervoxel-based randomisation. - - roi_split_boundary_size: float or list of float, optional, default: 0.0 - Width of the rim used for splitting the mask into bulk and rim masks, in physical dimensions. Multiple values - can be provided to generate rims of different widths. - - roi_split_max_erosion: float, optional, default: 0.6 - Determines the minimum volume of the bulk mask when splitting the original mask into bulk and rim sections. - Fraction of the original volume, e.g. 0.6 means that the bulk contains at least 60% of the original mask. - - **kwargs: dict, optional - Unused keyword arguments. - """ - - def __init__( - self, - crop_around_roi: bool = False, - crop_distance: float = 150.0, - perturbation_noise_repetitions: int = 0, - perturbation_noise_level: Union[None, float] = None, - perturbation_rotation_angles: Union[None, List[float], float] = 0.0, - perturbation_translation_fraction: Union[None, List[float], float] = 0.0, - perturbation_roi_adapt_type: str = "distance", - perturbation_roi_adapt_size: Union[None, List[float], float] = 0.0, - perturbation_roi_adapt_max_erosion: float = 0.8, - perturbation_randomise_roi_repetitions: int = 0, - roi_split_boundary_size: Union[None, List[float], float] = 0.0, - roi_split_max_erosion: float = 0.6, - **kwargs - ): - - # Set crop_around_roi - self.crop_around_roi: bool = crop_around_roi - - # Check that crop distance is not negative. - if crop_distance < 0.0 and crop_around_roi: - raise ValueError(f"The cropping distance cannot be negative. Found: {crop_distance}") - - # Set crop_distance. - self.crop_distance: float = crop_distance - - # Check that noise repetitions is not negative. - perturbation_noise_repetitions = int(perturbation_noise_repetitions) - if perturbation_noise_repetitions < 0: - raise ValueError(f"The number of repetitions where noise is added to the image cannot be negative. Found: {perturbation_noise_repetitions}") - - # Set noise repetitions. - self.add_noise: bool = perturbation_noise_repetitions > 0 - self.noise_repetitions: int = perturbation_noise_repetitions - - # Check noise level. - if perturbation_noise_level is not None: - if perturbation_noise_level < 0.0: - raise ValueError(f"The noise level cannot be negative. Found: {perturbation_noise_level}") - - # Set noise level. - self.noise_level: Union[None, float] = perturbation_noise_level - - # Convert perturbation_rotation_angles to list, if necessary. - if not isinstance(perturbation_rotation_angles, list): - perturbation_rotation_angles = [perturbation_rotation_angles] - - # Check that the rotation angles are floating points. - if not all(isinstance(ii, float) for ii in perturbation_rotation_angles): - raise TypeError(f"Not all values for perturbation_rotation_angles are floating point values.") - - # Set rotation_angles. - self.rotation_angles: List[float] = perturbation_rotation_angles - - # Convert perturbation_translation_fraction to list, if necessary. - if not isinstance(perturbation_translation_fraction, list): - perturbation_translation_fraction = [perturbation_translation_fraction] - - # Check that the translation fractions are floating points. - if not all(isinstance(ii, float) for ii in perturbation_translation_fraction): - raise TypeError(f"Not all values for perturbation_translation_fraction are floating point values.") - - # Check that the translation fractions lie between 0.0 and 1.0. - if not all(0.0 <= ii < 1.0 for ii in perturbation_translation_fraction): - raise ValueError("Not all values for perturbation_translation_fraction lie between 0.0 and 1.0, " - "not including 1.0.") - - # Set translation_fraction. - self.translation_fraction: List[float] = perturbation_translation_fraction - - # Check roi adaptation type. - if perturbation_roi_adapt_type not in ["distance", "fraction"]: - raise ValueError(f"The perturbation ROI adaptation type should be one of 'distance' or 'fraction'. Found: {perturbation_roi_adapt_type}") - - # Set roi_adapt_type - self.roi_adapt_type: str = perturbation_roi_adapt_type - - # Convert to perturbation_roi_adapt_size to list. - if not isinstance(perturbation_roi_adapt_size, list): - perturbation_roi_adapt_size = [perturbation_roi_adapt_size] - - # Check that the adapt sizes are floating points. - if not all(isinstance(ii, float) for ii in perturbation_roi_adapt_size): - raise TypeError(f"Not all values for perturbation_roi_adapt_size are floating point values.") - - # Check that values do not go below 0. - if perturbation_roi_adapt_type == "fraction" and any([ii <= -1.0 for ii in perturbation_roi_adapt_size]): - raise ValueError("All values for perturbation_roi_adapt_size should be greater than -1.0. However, " - "one or more values were less.") - - # Set roi_adapt_size - self.roi_adapt_size: List[float] = perturbation_roi_adapt_size - - # Check that perturbation_roi_adapt_max_erosion is between 0.0 and 1.0. - if not 0.0 <= perturbation_roi_adapt_max_erosion <= 1.0: - raise ValueError(f"The perturbation_roi_adapt_max_erosion parameter must have a value between 0.0 and " - f"1.0. Found: {perturbation_roi_adapt_max_erosion}") - - # Set max volume erosion. - self.max_volume_erosion: float = perturbation_roi_adapt_max_erosion - - # Check that ROI randomisation representation is not negative. - perturbation_randomise_roi_repetitions = int(perturbation_randomise_roi_repetitions) - if perturbation_randomise_roi_repetitions < 0: - raise ValueError( - f"The number of repetitions where the ROI mask is randomised cannot be negative. Found: " - f"{perturbation_randomise_roi_repetitions}") - - # Set ROI mask randomisation repetitions. - self.randomise_roi: bool = perturbation_randomise_roi_repetitions > 0 - self.roi_random_rep: int = perturbation_randomise_roi_repetitions - - # Check that roi_split_max_erosion is between 0.0 and 1.0. - if not 0.0 <= roi_split_max_erosion <= 1.0: - raise ValueError(f"The roi_split_max_erosion parameter must have a value between 0.0 and " - f"1.0. Found: {roi_split_max_erosion}") - - # Division of roi into bulk and boundary - self.max_bulk_volume_erosion: float = roi_split_max_erosion - - # Convert roi_split_boundary_size to list, if necessary. - if not isinstance(roi_split_boundary_size, list): - roi_split_boundary_size = [roi_split_boundary_size] - - # Check that the translation fractions are floating points. - if not all(isinstance(ii, float) for ii in roi_split_boundary_size): - raise TypeError(f"Not all values for roi_split_boundary_size are floating point values.") - - # Check that the translation fractions lie between 0.0 and 1.0. - if not all(ii >= 0.0 for ii in roi_split_boundary_size): - raise ValueError("Not all values for roi_split_boundary_size are positive.") - - # Set roi_boundary_size. - self.roi_boundary_size: List[float] = roi_split_boundary_size - - # Initially local variables - self.translate_x: Union[None, float] = None - self.translate_y: Union[None, float] = None - self.translate_z: Union[None, float] = None
- - - -def get_perturbation_settings() -> list[dict[str, Any]]: - return [ - setting_def("crop_around_roi", "bool", xml_key=["crop_around_roi", "resect"], test=True), - setting_def("crop_distance", "float", test=10.0), - setting_def( - "perturbation_noise_repetitions", "int", xml_key="noise_repetitions", - class_key="noise_repetitions", test=10 - ), - setting_def( - "perturbation_noise_level", "float", xml_key="noise_level", class_key="noise_level", test=0.75 - ), - setting_def( - "perturbation_rotation_angles", "float", to_list=True, xml_key=["rotation_angles", "rot_angles"], - class_key="rotation_angles", test=[-33.0, 33.0] - ), - setting_def( - "perturbation_translation_fraction", "float", to_list=True, - xml_key=["translation_fraction", "translate_frac"], class_key="translation_fraction", test=[0.25, 0.75] - ), - setting_def( - "perturbation_roi_adapt_type", "str", xml_key="roi_adapt_type", class_key="roi_adapt_type", - test="fraction" - ), - setting_def( - "perturbation_roi_adapt_size", "float", to_list=True, xml_key="roi_adapt_size", - class_key="roi_adapt_size", test=[0.8, 1.0, 1.2] - ), - setting_def( - "perturbation_roi_adapt_max_erosion", "float", xml_key=["roi_adapt_max_erosion", "eroded_vol_fract"], - class_key="max_volume_erosion", test=0.2 - ), - setting_def( - "perturbation_randomise_roi_repetitions", "int", xml_key="roi_random_rep", - class_key="roi_random_rep", test=100 - ), - setting_def( - "roi_split_boundary_size", "float", to_list=True, xml_key="roi_boundary_size", - class_key="roi_boundary_size", test=[2.0, 5.0] - ), - setting_def( - "roi_split_max_erosion", "float", xml_key=["roi_split_max_erosion", "bulk_min_vol_fract"], - class_key="max_bulk_volume_erosion", test=0.2 - ) - ] - -
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_modules/mirp/settings/transformation_parameters.html b/docs/_modules/mirp/settings/transformation_parameters.html index ec2e02b1..7e05ea74 100644 --- a/docs/_modules/mirp/settings/transformation_parameters.html +++ b/docs/_modules/mirp/settings/transformation_parameters.html @@ -3,7 +3,7 @@ - mirp.settings.transformation_parameters — mirp 2.2.1 documentation + mirp.settings.transformation_parameters — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,6 @@

Deep Dive

Contributing

diff --git a/docs/_modules/mirp/utilities/config_utilities.html b/docs/_modules/mirp/utilities/config_utilities.html index 327ba543..6d5fb795 100644 --- a/docs/_modules/mirp/utilities/config_utilities.html +++ b/docs/_modules/mirp/utilities/config_utilities.html @@ -3,7 +3,7 @@ - mirp.utilities.config_utilities — mirp 2.2.1 documentation + mirp.utilities.config_utilities — mirp 2.2.3 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,6 @@

Deep Dive

Contributing

diff --git a/docs/_sources/mirp.deepLearningPreprocessing.rst.txt b/docs/_sources/mirp.deepLearningPreprocessing.rst.txt deleted file mode 100644 index 4b3db28e..00000000 --- a/docs/_sources/mirp.deepLearningPreprocessing.rst.txt +++ /dev/null @@ -1,32 +0,0 @@ -Preprocess images for deep learning -=================================== - -MIRP can be used to preprocess images for deep learning. Images are processed using the standard IBSI-compliant image -processing workflow, with a final cropping step (if any). - -The deep learning preprocessing function comes in two versions: - -* :func:`~mirp.deep_learning_preprocessing.deep_learning_preprocessing`: conventional function that processes images. -* :func:`~mirp.deep_learning_preprocessing.deep_learning_preprocessing_generator`: generator that yields processed images. - -Example -------- - -MIRP can be used to crop images, e.g. to make them conform to the input of convolutional neural networks: - -.. code-block:: python - - from mirp import deep_learning_preprocessing - - processed_data = deep_learning_preprocessing( - image="path to image", - mask="path to mask", - crop_size=[50, 224, 224] - ) - -API documentation ------------------ -.. automodule:: mirp.deep_learning_preprocessing - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/mirp.extractFeaturesAndImages.rst.txt b/docs/_sources/mirp.extractFeaturesAndImages.rst.txt deleted file mode 100644 index fdd5b6c2..00000000 --- a/docs/_sources/mirp.extractFeaturesAndImages.rst.txt +++ /dev/null @@ -1,179 +0,0 @@ -Process image and compute quantitative image features -===================================================== - -Two of the main uses for MIRP are to process images and compute quantitative features from images. Both use the same -standardized, IBSI 1 and IBSI 2 compliant, workflow. Two versions of the image processing and feature computation -function exist: - -* :func:`~mirp.extract_features_and_images.extract_features_and_images`: conventional function that processes images and - computes features. -* :func:`~mirp.extract_features_and_images.extract_features_and_images_generator`: generator that yields processed - images and features computed therefrom. - -For convenience, the above functions are wrapped to allow for only computing feature values (without exporting -images) and only processing images (without computing features): - -* :func:`~mirp.extract_features_and_images.extract_features`: conventional function that only computes features. -* :func:`~mirp.extract_features_and_images.extract_features_generator`: generator that only yields feature values. -* :func:`~mirp.extract_features_and_images.extract_features_and_images`: conventional function that only processes images. -* :func:`~mirp.extract_features_and_images.extract_features_and_images_generator`: generator that yields processed images. - -Examples --------- - -MIRP can compute features from regions of interest in images. The simplest example is: - -.. code-block:: python - - from mirp import extract_features - - feature_data = extract_features( - image="path to image", - mask="path to mask", - base_discretisation_method="fixed_bin_number", - base_discretisation_n_bins=32 - ) - -The ``base_discretisation_method`` and its corresponding parameters are required as long as any texture or -intensity-histogram features are involved. - -A more realistic example involves interpolation to ensure that voxel spacing is the same for all images in a dataset. -For example, a positron emission tomography (PET) dataset may be resampled to 3 by 3 by 3 mm isotropic voxels. This -is achieved by providing the ``new_spacing`` argument, i.e. ``new_spacing=3.0`` or ``new_spacing=[3.0, 3.0, 3.0]``. - -.. code-block:: python - - from mirp import extract_features - - feature_data = extract_features( - image="path to PET image", - mask="path to PET mask", - image_modality="PET", - new_spacing=3.0, - base_discretisation_method="fixed_bin_number", - base_discretisation_n_bins=32 - ) - -Here, ``image_modality="PET"`` is used to declare that the image is a PET image. If this is a DICOM image, this -argument is not necessary -- the modality can be inferred from the metadata. - -Sometimes, in-plane resolution is much higher than axial resolution. For example, in (older) computed tomography (CT) -images, in-plane resolution may be 1 by 1 mm, but the distance between slices can be 7 mm or greater. -Resampling to isotropic 1 by 1 by 1 mm voxels causes considerable data to be inferred between slices, -which may not be desirable. In that case, images may be better processed by slice-by-slice (*2D*). -This is achieved by providing the ``by_slice`` argument, i.e. ``by_slice=True``. - -.. code-block:: python - - from mirp import extract_features - - feature_data = extract_features( - image="path to CT image", - mask="path to CT mask", - image_modality="CT", - by_slice=True, - new_spacing=1.0, - base_discretisation_method="fixed_bin_number", - base_discretisation_n_bins=32 - ) - -In the above example ``new_spacing=1.0`` causes all images to be resampled in-plane to a 1 mm resolution. - -The previous examples used the *Fixed Bin Number* to discretise intensities within the mask into a fixed number of bins. -For some imaging modalities, intensities carry a physical (or at least calibrated) meaning, such as Hounsfield units in -computed tomography and standardised uptake values in positron emission tomography. For these *Fixed Bin Size* (also -known as *Fixed Bin Width*) can be interesting, as this creates a mapping between intensities and bins that is -consistent across the dataset. MIRP sets the lower bound of the initial bin using the resegmentation range, or in -its absence, a default value (if any). - -Below we compute features from a computed tomography image using a *Fixed Bin Size* discretisation method. -Because the resegmentation range is not set, the lower bound of the initial bin defaults to -1000 Hounsfield Units. - -.. code-block:: python - - from mirp import extract_features - - feature_data = extract_features( - image="path to CT image", - mask="path to CT mask", - image_modality="CT", - new_spacing=1.0, - base_discretisation_method="fixed_bin_size", - base_discretisation_bin_width=25.0 - ) - -If the region of interest contained in the mask in the above example covers soft tissue, this default might not be good. -We can change this by providing the ``resegmentation_intensity_range`` argument. Here, we provide a window more fitting -for soft tissues: ``resegmentation_intensity_range=[-200.0, 200.0]``. Thus the lower bound of the initial bin is set to --200 Hounsfield Units, and 16 bins total are formed. - -.. code-block:: python - - from mirp import extract_features - - feature_data = extract_features( - image="path to CT image", - mask="path to CT mask", - image_modality="CT", - new_spacing=1.0, - resegmentation_intensity_range=[-200.0, 200.0], - base_discretisation_method="fixed_bin_size", - base_discretisation_bin_width=25.0 - ) - -The above examples all compute features from the base image. Filters can be applied to images to enhance patterns such -as edges. In the example below, we compute features from a Laplacian-of-Gaussian filtered image: - -.. code-block:: python - - from mirp import extract_features - - feature_data = extract_features( - image="path to image", - mask="path to mask", - new_spacing=1.0, - base_discretisation_method="fixed_bin_size", - base_discretisation_bin_width=25.0, - filter_kernels="laplacian_of_gaussian", - laplacian_of_gaussian_sigma=2.0 - ) - -By default, only statistical features are computed from filtered images, and features are still extracted from the -base image. You can change this by specifying ``base_feature_families="none"`` (to prevent computing features from -the base image) and specifying ``response_map_feature_families``. In the example below, we compute both statistical -features and intensity histogram features. - -.. code-block:: python - - from mirp import extract_features - - feature_data = extract_features( - image="path to image", - mask="path to mask", - new_spacing=1.0, - base_feature_families="none", - response_map_feature_families=["statistics", "intensity_histogram"], - filter_kernels="laplacian_of_gaussian", - laplacian_of_gaussian_sigma=2.0 - ) - -Even though intensity histogram features require discretisation, you don't have to provide a discretisation method -and associated parameters. This is because for many filters, intensities in the filtered images no longer represent a -measurable quantity. Hence a *Fixed Bin Number* algorithm is used by default, with 16 bins. These parameters can be -changed using the ``response_map_discretisation_method`` and ``response_map_discretisation_n_bins`` arguments. - -API documentation ------------------ - -.. autofunction:: mirp.extract_features_and_images.extract_features_and_images - -.. autofunction:: mirp.extract_features_and_images.extract_features_and_images_generator - -.. autofunction:: mirp.extract_features_and_images.extract_features - -.. autofunction:: mirp.extract_features_and_images.extract_features_generator - -.. autofunction:: mirp.extract_features_and_images.extract_images - -.. autofunction:: mirp.extract_features_and_images.extract_images_generator - diff --git a/docs/_sources/mirp.extractImageParameters.rst.txt b/docs/_sources/mirp.extractImageParameters.rst.txt deleted file mode 100644 index 350fd3bf..00000000 --- a/docs/_sources/mirp.extractImageParameters.rst.txt +++ /dev/null @@ -1,30 +0,0 @@ -Extract image metadata -====================== - -Image metadata, such as acquisition and reconstruction parameters, are interesting to report. To facilitate their -reporting, MIRP can automatically extract relevant parameters from metadata. - -.. note:: - Many relevant parameters can only extracted from DICOM files, because other file types lack the - corresponding metadata. - -Example -------- - -Parameters of a single image can be extracted from their metadata as follows: - -.. code-block:: python - - from mirp import extract_image_parameters - - image_parameters = extract_image_parameters( - image="path to image" - ) - -API documentation ------------------ - -.. automodule:: mirp.extract_image_parameters - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/mirp.extractMaskLabels.rst.txt b/docs/_sources/mirp.extractMaskLabels.rst.txt deleted file mode 100644 index fd6ba487..00000000 --- a/docs/_sources/mirp.extractMaskLabels.rst.txt +++ /dev/null @@ -1,26 +0,0 @@ -Extract mask labels -=================== - -Mask files can contain labels for multiple regions of interest. You can use the -:func:`~mirp.extract_mask_labels.extract_mask_labels` function to obtain these labels. - -Example -------- - -Region of interest labels can be extract from mask files as follows: - -.. code-block:: python - - from mirp import extract_mask_labels - - mask_labels = extract_mask_labels( - mask="path to mask" - ) - -API documentation ------------------ - -.. automodule:: mirp.extract_mask_labels - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_sources/mirp.importData.rst.txt b/docs/_sources/mirp.importData.rst.txt deleted file mode 100644 index adbc3da9..00000000 --- a/docs/_sources/mirp.importData.rst.txt +++ /dev/null @@ -1,301 +0,0 @@ -Configure image and mask import -=============================== - -Many relevant MIRP functions require images, masks or both as input. This section provides details on how image and -mask import is configured. - -Specifying input ----------------- - -MIRP processes and analyses images and masks. There are multiple ways to provide images and masks: - -* By specifying the directory where images and/or masks are found: - * **Nested flat layout**: In a nested flat layout all images and masks are separated for each sample. For - example, an image dataset of 128 samples may be organised as follows:: - - image_root_directory - ├─ sample_001 - │ └─ ... - ├─ ... - └─ sample_127 - └─ image_sub_folder - ├─ CT_dicom_000.dcm - ├─ ... - └─ CT_dicom_255.dcm - └─ mask.dcm - - Images and mask files are directly under the sample directory. Only one keyword argument is required: - - .. code-block:: python - - some_function( - ..., - image = "image_root_directory", - ... - ) - - MIRP is generally able to determine which files are images and which files are masks. However, there may be - cases where MIRP is unable to determine if a file is an image or a mask. In those cases, additional keyword - arguments may be provided: - - .. code-block:: python - - some_function( - ..., - image = "image_root_directory", - image_name = "CT_dicom_*", - mask_name = "mask" - ... - ) - - Here, ``image_name`` and ``mask_name`` contain patterns for image and mask files, respectively. ``"CT_dicom_*"`` - contains a wildcard character (``*``) that matches any pattern starting with ``"CT_dicom_"``. File extensions are - never of the pattern. - - * **Fully nested structure**: In a nested structure all images and masks are separated for each. Unlike the above - example, image and mask data may be organised into different subdirectory structures:: - - image_root_directory - ├─ sample_001 - │ └─ image_sub_folder - │ │ └─ ... - │ └─ mask_sub_folder - │ └─ ... - ├─ ... - └─ sample_127 - └─ image_sub_folder - │ ├─ CT_dicom_000.dcm - │ ├─ ... - │ └─ CT_dicom_255.dcm - └─ mask_sub_folder - └─ mask.dcm - - Here the directory for each sample contains consistently named subdirectory structures (``image_sub_folder`` - and ``mask_sub_folder``), that contains the set of DICOM images and a mask, respectively. Then the following - keyword arguments may be specified: - - .. code-block:: python - - some_function( - ..., - image = "image_root_directory", - image_sub_folder = "image_sub_folder", - mask_sub_folder = "mask_sub_folder", - ... - ) - - The ``mask`` keyword argument is automatically assumed to be equal to ``image``, i.e. images and masks are - under the same root directory. If this is not the case, ``mask`` should be specified as well. - - .. note:: - MIRP will interpret the name of the directory that is neither part of the root directory or the subdirectory - structures as the sample name, unless the sample name can be determined from metadata (i.e. DICOM files). In - the example above, sample names based on the directory structure would be ``"sample_001"`` to ``"sample_127"``. - - * **Flat layout**: In a flat layout, all image and mask files are contained in the same directory:: - - image_root_directory - ├─ sample_001_CT_dicom_000.dcm - ├─ ... - ├─ sample_001_CT_dicom_319.dcm - ├─ sample_127_CT_dicom_000.dcm - ├─ ... - ├─ sample_127_CT_dicom_255.dcm - ├─ sample_001_mask.dcm - ├─ ... - └─ sample_127_mask.dcm - - Flat layouts are somewhat more challenging for MIRP, as sample identifiers have to be inferred, and images and - masks may be hard to associate. For DICOM images sample names and other association data typically can be - obtained from the DICOM metadata. For other types of images, e.g. NIfTI or numpy, in a flat layout, - ``image_name`` and ``mask_name`` should be provided: - - .. code-block:: python - - some_function( - ..., - image = "image_root_directory", - image_name = "#_CT_dicom_*", - mask_name = "#_mask", - ... - ) - - The above example contain two wildcards: ``#`` and ``*`` that fulfill different roles. While ``*`` matches any - pattern, ``#`` matches any pattern and uses that pattern as the sample name. This way, sample identifiers can - be determined for flat layouts. - -* By providing a direct path to image and mask files: - * **Single image and mask**: A path to an image and mask may be provided as follows: - - .. code-block:: python - - some_function( - ..., - image = "image_directory/image.nii.gz", - mask = "mask_directory/mask.nii.gz", - ... - ) - - Here ``"image.nii.gz"`` is an image file in NIfTI format, located in the ``"image_directory"`` directory. - Similarly, ``"mask.nii.gz"`` is a mask file (containing integer-value labels) that is located in the - ``"mask_directory"`` directory. - - * **Multiple images and masks**: Multiple images and masks can be provided as lists. - - .. code-block:: python - - some_function( - ..., - image = ["image_directory/image_001.nii.gz", "image_directory/image_002.nii.gz"], - mask = ["mask_directory_001/mask.nii.gz", "mask_directory_002/mask.nii.gz"], - ... - ) - - .. note:: - It is possible to provide multiple masks for each image as long as their is some way to associate the image - with its masks, e.g. on sample name or frame of reference. - - .. note:: - In absence of any further identifiers for associating images and masks, MIRP will treat image and mask lists of - equal length as being sorted by element, and associate the first mask with the first image, the second mask - with the second image, and so forth. - -* By providing the image and mask directly: - Images and masks can be provided directly using ``numpy.ndarray`` objects. - - .. warning:: - Even though images can be directly provided as ``numpy`` arrays, this should only be done if all data has - the same (physical) resolution, or if physical resolution does not matter. This is because ``numpy`` arrays only - contain values, and no metadata concerning pixel or voxel spacing. Internally, MIRP will use a default value of - 1.0 × 1.0 × 1.0. - - * **Single image and mask**: Let ``numpy_image`` and ``numpy_mask`` be two ``numpy`` arrays with the same - dimension. Then, these objects can be provided as follows: - - .. code-block:: python - - some_function( - ..., - image = numpy_image, - mask = numpy_mask, - ... - ) - - * **Multiple images and masks**: Multiple images and masks can be provided as lists of ``numpy`` arrays: - - .. code-block:: python - - some_function( - ..., - image = [numpy_image_001, numpy_image_002] - mask = [numpy_mask_001, numpy_mask_002], - ... - ) - - .. warning:: - While it is possible to provide multiple masks for each image, in practice there is no safe way to do so. The - only way to associate image and masks is by their image dimension, which may be the same for different images. - Hence, providing one mask per image is recommended. MIRP will treat image and mask lists of equal length as - being sorted by element, and associate the first mask with the first image, the second mask with the second - image, and so forth. - -* By specifying the configuration in a stand-alone data ``xml`` file. An empty copy of the ``xml`` file can be - created using :func:`mirp.utilities.config_utilities.get_data_xml`. The tags of the``xml`` file are the same as the - arguments of :func:`~mirp.data_import.import_image_and_mask.import_image_and_mask`, that are listed below. - -Selecting specific images and masks ------------------------------------ -On occasion, input should be more selective. This can be done by specifying additional arguments: - -* Select specific samples using ``sample_name``: - Sample names can be provided as a list of strings to filter images and masks and exclude those that do not appear - in the provided list. - - .. note:: - If sample names cannot be determined from metadata, directory structure or file names, MIRP cannot filter image - and mask files using the provided sample names. In this case, should the list of provided sample names equal - that of the images, the provided sample names will be associated one-to-one with images. Otherwise, MIRP will - randomly generate sample names. - -* Select specific image and mask files based on their file names using ``image_name`` and ``mask_name``: - MIRP can filter image and mask files based on file names. ``image_name`` and ``mask_name`` arguments each take a - single string as argument. This string is matched exactly, and only file names that match that string are selected. - File extensions are ignored. - - To allow for some flexibility, wildcard characters can be used. MIRP recognises two types of wildcard characters: - ``*`` and ``#``. ``*`` denotes any character. For example, if files are named ``image_001.nii.gz``, - ``image_002.nii.gz`` and ``another_image_001.nii.gz``, using ``image_name="image_*"`` will select - ``image_001.nii.gz``, ``image_002.nii.gz``. Using ``image_name="*image_*"`` will select all three. - - The other wildcard character (``#``) denotes the part of the file name that is the sample name. For example, if - files are named ``sample_001_image_001.nii.gz``, ``sample_001_image_002.nii.gz`` and - ``sample_002_image_001.nii.gz``, using ``image_name="#_image_*"`` will select all three files, and assign - ``sample_001``, ``sample_001`` and ``sample_002`` as sample names, respectively. - - The ``mask_name`` argument functions exactly the same as ``image_name``. - -* Select the image and mask file types using ``image_file_type`` and ``mask_file_type``: - MIRP can filter image and mask files based on the file type. MIRP currently supports DICOM (``"dicom"``), NIfTI - (``"nifti"``), NRRD (``"nrrd"``) and numpy (``"numpy"``) files as file format. - -* Select image files based on image modality using ``image_modality``: - MIRP can filter image files based on the image modality. Aside from generic image modality, MIRP specifically - checks for the following modalities: - - * Computed tomography (CT): ``"ct"`` - - * Positron emission tomography (PET): ``"pet"`` or ``"pt"`` - - * Magnetic resonance imaging (MRI): ``"mri"`` or ``"mr"`` - - * Radiotherapy dose (RTDOSE): ``"rtdose"`` - - Images from other modalities are currently not fully supported, and a default ``"generic"`` image modality will - be assigned. - - .. note:: - Image modality is important because it adapts the image processing workflow to the requirements and - possibilities of each modality. For example, bias-field correction can only be performed on MR imaging, and - Hounsfield units are automatically rounded for CT imaging. - - .. warning:: - Only DICOM images contain metadata concerning image modality. Images from other file types are interpreted as - ``"generic"`` by default and cannot be filtered using ``image_modality``. For these image, the - ``image_modality`` argument sets the actual image modality. - -* Select mask files based on mask modality using ``mask_modality``: - MIRP can filter mask files based on the modality of the mask. Aside form generic masks, MIRP specifically checks for - DICOM radiotherapy structure (RTSTRUCT) and DICOM segmentation (SEG) files. - - .. note:: - Only DICOM images contain metadata concerning mask modality. Masks from other file types are interpreted as - ``"generic_mask"`` by default and cannot be filtered using ``mask_modality``. - - .. note:: - Since version ``2.1.0`` MIRP does not require that images and masks have the exact same dimensions, origin, - spacing and orientation, with the exception of numpy images and masks. This is explicitly true for DICOM - radiotherapy structure (RTSTRUCT) sets. These are either mapped to the corresponding image if image slices - are referenced in the structure set, or use internal data to generate a voxel-based mask. However, images and - their masks should share the same frame of reference. - -* Select the specific regions of interest using ``roi_name``: - A mask file may contain multiple masks. By default, MIRP will assess all masks in a file. The ``roi_name`` argument - can be used to specify the list of regions of interest that should be assessed. For DICOM mask files, names of - regions of interest are provided in the metadata. For other mask file types, masks are either boolean, or - non-negative integers. For these, ``False`` or ``0`` are interpreted as background, and not assessed. If, for - example, regions of interest are labelled with ``1``, ``2`` and ``3``, MIRP will recognize both - ``roi_name=["1", "2", "3"]`` and ``roi_name=["region_1", "region_2", "region_3"]``. - - You can use the :func:`~mirp.extractMaskLabels.extract_mask_labels` function to identify the names of the regions - of interest in mask files. - -API documentation ------------------ - -.. note:: The :func:`~mirp.importData.importImageAndMask.import_image_and_mask` function is called internally by other - functions. These function pass through keyword arguments to - :func:`~mirp.data_import.import_image_and_mask.import_image_and_mask`. - -.. autofunction:: mirp.data_import.import_image_and_mask.import_image_and_mask - -.. autofunction:: mirp.utilities.config_utilities.get_data_xml diff --git a/docs/_sources/mirp.settings.rst.txt b/docs/_sources/mirp.settings.rst.txt deleted file mode 100644 index 3539cba0..00000000 --- a/docs/_sources/mirp.settings.rst.txt +++ /dev/null @@ -1,82 +0,0 @@ -Configure the image processing and feature extraction workflow -============================================================== - -MIRP implements the standardised image processing and feature extraction workflow recommended by the Image Biomarker -Standardization Initiative. Many aspects of this workflow can be configured. This can be done in several ways: - -* Using keyword arguments. The keyword arguments match the parameters used to initialise the various settings objects - documented below. -* By creating a :class:`~mirp.settings.generic.SettingsClass` object. This object can be initialised using the - same keyword arguments as above. Alternatively, the attributes of the - :class:`~mirp.settings.generic.SettingsClass` can be filled with the specific objects documented below. -* By specifying the configuration in a stand-alone settings ``xml`` file. An empty copy of the ``xml`` file can be - created using :func:`~mirp.utilities.config_utilities.get_settings_xml`. - -General settings ----------------- - -.. automodule:: mirp.settings.general_parameters - :members: - :no-undoc-members: - :show-inheritance: - -Image processing settings -------------------------- - -.. automodule:: mirp.settings.image_processing_parameters - :members: - :no-undoc-members: - :show-inheritance: - -Image perturbation settings ---------------------------- - -.. automodule:: mirp.settings.perturbation_parameters - :members: - :no-undoc-members: - :show-inheritance: - -Image interpolation settings ----------------------------- - -.. automodule:: mirp.settings.interpolation_parameters - :members: - :no-undoc-members: - :show-inheritance: - -Mask resegmentation settings ----------------------------- - -.. automodule:: mirp.settings.resegmentation_parameters - :members: - :no-undoc-members: - :show-inheritance: - -Feature computation settings ----------------------------- - -.. automodule:: mirp.settings.feature_parameters - :members: - :no-undoc-members: - :show-inheritance: - -Image transformation settings ------------------------------ - -.. automodule:: mirp.settings.transformation_parameters - :members: - :no-undoc-members: - :show-inheritance: - -Generic settings object ------------------------ - -.. automodule:: mirp.settings.generic - :members: - :no-undoc-members: - :show-inheritance: - -Creating a settings xml file ----------------------------- - -.. autofunction:: mirp.utilities.config_utilities.get_settings_xml diff --git a/docs/_sources/quick_start.rst.txt b/docs/_sources/quick_start.rst.txt deleted file mode 100644 index 8135f111..00000000 --- a/docs/_sources/quick_start.rst.txt +++ /dev/null @@ -1,178 +0,0 @@ -Tutorial -=========== - -This tutorial describes a - -Installing MIRP ---------------- - -First you need to install MIRP. - -Before you begin, you need to: - 1. Install MIRP (see :ref:`installation`). - 2. Have a dataset with imaging and corresponding masks. - -Computing quantitative features -------------------------------- -Suppose you have a dataset of computed tomography (CT) DICOM images with corresponding segmentation masks that you want -to use to compute quantitative features from. Now, suppose that both images and masks are seperated by patient -directories within a general ``path/to/data`` folder. For each patient, the CT image is in the ``image`` directory, -and its corresponding segmentation in ``mask``. For patient ``patient_003``, the full path to the -image directory is ``path/to/data/patient_003/image``, and to the mask directory is ``path/to/data/patient_003/mask``. - -We want to compute features from a pre-defined gross tumour mask (called ``GTV``). We are interested in the soft-tissue -range, with Hounsfield Units between -150 and 200 HU. To harmonise differences in resolution and slice distance -between CT images from different patients, all voxels are resampled to a 1.0 by 1.0 by 1.0 mm size. Histogram and -texture features are computed after discretisation using the `fixed bin size` method with a bin size of 25 Hounsfield -Units. - -MIRP can compute quantitative features using the function call below: - -.. code-block:: python - - import pandas as pd - from mirp import extract_features - - feature_data = extract_features( - image="path/to/data", - mask="path/to/data", - image_sub_folder="image", - mask_sub_folder="mask", - roi_name="GTV", - new_spacing=1.0, - resegmentation_intensity_range=[-150.0, 200.0], - base_discretisation_method="fixed_bin_size", - base_discretisation_bin_width=25.0 - ) - -The above code results in ``feature_data`` which is a list of ``pandas.DataFrame`` that contains feature values for -every patient. These can combined into a single ``pandas.DataFrame`` as follows: - -.. code-block:: python - - feature_data = pd.concat(feature_data) - -Visualising filtered images ---------------------------- -Image filters enhance aspects such as edges, blobs and directional structures. MIRP supports several filters (see -:ref:`quantitative_image_analysis`). Suppose you want to use a Laplacian-of-Gaussian filter, with the width of the -Gaussian equal to 2.0 mm. - -We can first inspect the images visually using ``extract_images``. By default, ``export_images`` exports images and -masks as dictionary with ``numpy`` data and metadata (or as NIfTI files, in case ``write_dir`` is provided). These -can be used with external viewers, or your own scripts. MIRP also has a simple viewer for its internal native -image objects. To use this viewer, you can set ``image_export_format = "native"``. - -.. code-block:: python - - from mirp import extract_images - - images = extract_images( - image="path/to/data", - mask="path/to/data", - image_sub_folder="image", - mask_sub_folder="mask", - roi_name="GTV", - new_spacing=1.0, - resegmentation_intensity_range=[-150.0, 200.0], - filter_kernels="laplacian_of_gaussian", - laplacian_of_gaussian_sigma=2.0, - image_export_format="native" - ) - -Here, ``images`` is a list of images and masks, with one entry for each patient. Each entry consist of two nested -lists, one for images and the second for masks. In this case, the nested list of images contains two entries, and -that of masks only one (for the ``GTV`` region of interest). The first image is the CT image, after interpolation to -1.0 by 1.0 by 1.0 mm voxels. The second image is the Laplacian-of-Gaussian filtered image. Each image can be viewed -using the ``show`` method: - -.. code-block:: python - patient_1_images, patient_1_mask = images[0] - patient_1_ct_image, patient_1_log_image = patient_1_images - - # View the CT image - patient_1_ct_image.show() - - # View the Laplacian-of-Gaussian filtered image - patient_1_log_image.show() - -Computing quantitative features from filtered images ----------------------------------------------------- - -Of course, features can also be computed from filtered images (also known as response maps). By default, only -statistical features [Zwanenburg2016]_ are computed from filtered images. - -.. code-block:: python - - feature_data = extract_features( - image="path/to/data", - mask="path/to/data", - image_sub_folder="image", - mask_sub_folder="mask", - roi_name="GTV", - new_spacing=1.0, - resegmentation_intensity_range=[-150.0, 200.0], - base_discretisation_method="fixed_bin_size", - base_discretisation_bin_width=25.0, - filter_kernels="laplacian_of_gaussian", - laplacian_of_gaussian_sigma=2.0 - ) - - feature_data = pd.concat(feature_data) - -``feature_data`` is a ``pandas.DataFrame`` similar to the first example, but with features computed from the -Laplacian-of-Gaussian image appended as new columns. - -Processing images ------------------ - -Computing quantitative features is nice, but what if you use deep learning instead? Suppose you just want to process -images as input for a VGG16 network [Simonyan2015]_. These networks have a default input size of 224 by 224 pixels. -For many deep learning applications, images should be provided 1-by-1, and we therefore will use a generator. By -providing the name of the region of interest, images will be cropped based on the center of the mask: - -.. code-block:: python - - from mirp import deep_learning_preprocessing_generator - - image_generator = deep_learning_preprocessing_generator( - image="path/to/data", - mask="path/to/data", - image_sub_folder="image", - mask_sub_folder="mask", - roi_name="GTV", - new_spacing=1.0, - crop_size=[224, 224], - output_slices=True - ) - - image_slices, mask_slices = next(image_generator) - -The generator yields a set of image slices (each 224 by 224 pixels) with corresponding masks. ``output_slices=False`` -can be used to generate 3D volumes. - -Extracting metadata -------------------- - -DICOM files contains metadata that are relevant to report in studies. MIRP can extract and collect such metadata: - -.. code-block:: python - - from mirp import extract_image_parameters - - image_parameters = extract_image_parameters( - image="path/to/data", - image_sub_folder="image" - ) - -``image_parameters`` is a ``pandas.DataFrame`` that contains relevant parameters extracted from DICOM metadata, such -as image resolution, scanner type and vendor as well as modality-specific attributes such as tube voltage for CT. -Note that metadata for other file types (e.g. NIfTI) are considerably more limited. - -References ----------- - -.. [Simonyan2015] Simonyan K, Zisserman A. Very Deep Convolutional Networks for Large-Scale Image Recognition. arXiv - [cs.CV] 2014. doi:10.48550/arXiv.1409.1556 -.. [Zwanenburg2016] Zwanenburg A, Leger S, Vallieres M, Loeck S. Image Biomarker Standardisation Initiative. arXiv - [cs.CV] 2016. doi:10.48550/arXiv.1612.070035 diff --git a/docs/_sources/tutorial_apply_image_filter.ipynb.txt b/docs/_sources/tutorial_apply_image_filter.ipynb.txt index 92d1fa5d..55e2b357 100644 --- a/docs/_sources/tutorial_apply_image_filter.ipynb.txt +++ b/docs/_sources/tutorial_apply_image_filter.ipynb.txt @@ -5,7 +5,6 @@ "id": "1e447b05-08e1-4326-9709-531acf639a69", "metadata": { "editable": true, - "jp-MarkdownHeadingCollapsed": true, "slideshow": { "slide_type": "" }, @@ -164,7 +163,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "INFO\t: MainProcess \t 2024-04-24 11:26:22,645 \t Initialising image extraction using ct images for 1.\n" + "INFO\t: MainProcess \t 2024-06-05 11:52:36,662 \t Initialising image extraction using ct images for 1.\n" ] } ], @@ -349,7 +348,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "INFO\t: MainProcess \t 2024-04-24 11:26:24,360 \t Initialising image extraction using ct images for 1.\n" + "INFO\t: MainProcess \t 2024-06-05 11:52:38,720 \t Initialising image extraction using ct images for 1.\n" ] } ], @@ -596,7 +595,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "id": "c211ca72-ad62-491e-86ac-b30ac25ad6b4", "metadata": {}, "outputs": [ @@ -604,8 +603,117 @@ "name": "stdout", "output_type": "stream", "text": [ - "INFO\t: MainProcess \t 2024-04-24 11:26:40,460 \t Initialising feature computation using ct images for 1.\n" + "INFO\t: MainProcess \t 2024-06-05 11:52:54,562 \t Initialising feature computation using ct images for 1.\n" ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
sample_nameimage_file_nameimage_directoryimage_study_dateimage_study_descriptionimage_series_descriptionimage_series_instance_uidimage_modalityimage_pet_suv_typeimage_mask_label...wavelet_simoncelli_level_3_stat_maxwavelet_simoncelli_level_3_stat_iqrwavelet_simoncelli_level_3_stat_rangewavelet_simoncelli_level_3_stat_madwavelet_simoncelli_level_3_stat_rmadwavelet_simoncelli_level_3_stat_medadwavelet_simoncelli_level_3_stat_covwavelet_simoncelli_level_3_stat_qcodwavelet_simoncelli_level_3_stat_energywavelet_simoncelli_level_3_stat_rms
01Nonechest_ct\\imageNoneNoneNone1.3.6.1.4.1.9590.100.1.2.296658988911737913102...ctNoneGTV-1...531.40425477.65685863.02098461.34911535.817258.1144053.1157912.0458652.349373e+0988.482671
\n", + "

1 rows × 311 columns

\n", + "
" + ], + "text/plain": [ + " sample_name image_file_name image_directory image_study_date \\\n", + "0 1 None chest_ct\\image None \n", + "\n", + " image_study_description image_series_description \\\n", + "0 None None \n", + "\n", + " image_series_instance_uid image_modality \\\n", + "0 1.3.6.1.4.1.9590.100.1.2.296658988911737913102... ct \n", + "\n", + " image_pet_suv_type image_mask_label ... \\\n", + "0 None GTV-1 ... \n", + "\n", + " wavelet_simoncelli_level_3_stat_max wavelet_simoncelli_level_3_stat_iqr \\\n", + "0 531.404254 77.65685 \n", + "\n", + " wavelet_simoncelli_level_3_stat_range wavelet_simoncelli_level_3_stat_mad \\\n", + "0 863.020984 61.349115 \n", + "\n", + " wavelet_simoncelli_level_3_stat_rmad wavelet_simoncelli_level_3_stat_medad \\\n", + "0 35.8172 58.114405 \n", + "\n", + " wavelet_simoncelli_level_3_stat_cov wavelet_simoncelli_level_3_stat_qcod \\\n", + "0 3.115791 2.045865 \n", + "\n", + " wavelet_simoncelli_level_3_stat_energy wavelet_simoncelli_level_3_stat_rms \n", + "0 2.349373e+09 88.482671 \n", + "\n", + "[1 rows x 311 columns]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ diff --git a/docs/_sources/tutorial_compute_radiomics_features_mr.ipynb.txt b/docs/_sources/tutorial_compute_radiomics_features_mr.ipynb.txt index 2f3e0077..24af350a 100644 --- a/docs/_sources/tutorial_compute_radiomics_features_mr.ipynb.txt +++ b/docs/_sources/tutorial_compute_radiomics_features_mr.ipynb.txt @@ -268,7 +268,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "INFO\t: MainProcess \t 2024-04-24 11:24:55,664 \t Initialising image extraction using mr images for STS_003.\n" + "INFO\t: MainProcess \t 2024-06-05 11:55:22,262 \t Initialising image extraction using mr images for STS_003.\n" ] } ], @@ -529,9 +529,9 @@ "name": "stdout", "output_type": "stream", "text": [ - "INFO\t: MainProcess \t 2024-04-24 11:24:57,113 \t Initialising feature computation using mr images for STS_001.\n", - "INFO\t: MainProcess \t 2024-04-24 11:25:10,524 \t Initialising feature computation using mr images for STS_002.\n", - "INFO\t: MainProcess \t 2024-04-24 11:25:19,984 \t Initialising feature computation using mr images for STS_003.\n" + "INFO\t: MainProcess \t 2024-06-05 11:55:23,768 \t Initialising feature computation using mr images for STS_001.\n", + "INFO\t: MainProcess \t 2024-06-05 11:55:36,474 \t Initialising feature computation using mr images for STS_002.\n", + "INFO\t: MainProcess \t 2024-06-05 11:55:46,159 \t Initialising feature computation using mr images for STS_003.\n" ] }, { @@ -556,15 +556,15 @@ " \n", " \n", " sample_name\n", - " image_settings_id\n", + " image_file_name\n", + " image_directory\n", + " image_study_date\n", + " image_study_description\n", + " image_series_description\n", + " image_series_instance_uid\n", " image_modality\n", - " image_voxel_size_x\n", - " image_voxel_size_y\n", - " image_voxel_size_z\n", - " image_noise_level\n", - " image_noise_iteration_id\n", - " image_rotation_angle\n", - " image_translation_x\n", + " image_pet_suv_type\n", + " image_mask_label\n", " ...\n", " ngl_hdhge_d1_a0.0_2d_fbn_n16\n", " ngl_glnu_d1_a0.0_2d_fbn_n16\n", @@ -582,15 +582,15 @@ " \n", " 0\n", " STS_001\n", - " \n", + " None\n", + " sts_images\\STS_001\\mr_t1\\image\n", + " 20000903\n", + " ^THIGH\n", + " AXIAL SE T1 - RESEARCH\n", + " 1.3.6.1.4.1.14519.5.2.1.5168.1900.236909650266...\n", " mr\n", - " 1.0\n", - " 1.0\n", - " 5.2\n", - " 0.0\n", - " NaN\n", - " 0.0\n", - " 0.0\n", + " None\n", + " GTV_Mass\n", " ...\n", " 649.904779\n", " 685.643163\n", @@ -606,15 +606,15 @@ " \n", " 0\n", " STS_002\n", - " \n", + " None\n", + " sts_images\\STS_002\\mr_t1\\image\n", + " 20060103\n", + " L-SPINE\n", + " AXT1\n", + " 1.3.6.1.4.1.14519.5.2.1.5168.1900.103003228939...\n", " mr\n", - " 1.0\n", - " 1.0\n", - " 7.0\n", - " 0.0\n", - " NaN\n", - " 0.0\n", - " 0.0\n", + " None\n", + " GTV_Mass\n", " ...\n", " 731.317187\n", " 232.044296\n", @@ -630,15 +630,15 @@ " \n", " 0\n", " STS_003\n", - " \n", + " None\n", + " sts_images\\STS_003\\mr_t1\\image\n", + " 20050330\n", + " MRI PELVIS C- C\n", + " AX T1\n", + " 1.3.6.1.4.1.14519.5.2.1.5168.1900.900154069973...\n", " mr\n", - " 1.0\n", - " 1.0\n", - " 7.0\n", - " 0.0\n", - " NaN\n", - " 0.0\n", - " 0.0\n", + " None\n", + " GTV_Mass\n", " ...\n", " 608.689328\n", " 472.701740\n", @@ -653,51 +653,56 @@ " \n", " \n", "\n", - "

3 rows × 192 columns

\n", + "

3 rows × 203 columns

\n", "" ], "text/plain": [ - " sample_name image_settings_id image_modality image_voxel_size_x \\\n", - "0 STS_001 mr 1.0 \n", - "0 STS_002 mr 1.0 \n", - "0 STS_003 mr 1.0 \n", + " sample_name image_file_name image_directory \\\n", + "0 STS_001 None sts_images\\STS_001\\mr_t1\\image \n", + "0 STS_002 None sts_images\\STS_002\\mr_t1\\image \n", + "0 STS_003 None sts_images\\STS_003\\mr_t1\\image \n", + "\n", + " image_study_date image_study_description image_series_description \\\n", + "0 20000903 ^THIGH AXIAL SE T1 - RESEARCH \n", + "0 20060103 L-SPINE AXT1 \n", + "0 20050330 MRI PELVIS C- C AX T1 \n", "\n", - " image_voxel_size_y image_voxel_size_z image_noise_level \\\n", - "0 1.0 5.2 0.0 \n", - "0 1.0 7.0 0.0 \n", - "0 1.0 7.0 0.0 \n", + " image_series_instance_uid image_modality \\\n", + "0 1.3.6.1.4.1.14519.5.2.1.5168.1900.236909650266... mr \n", + "0 1.3.6.1.4.1.14519.5.2.1.5168.1900.103003228939... mr \n", + "0 1.3.6.1.4.1.14519.5.2.1.5168.1900.900154069973... mr \n", "\n", - " image_noise_iteration_id image_rotation_angle image_translation_x ... \\\n", - "0 NaN 0.0 0.0 ... \n", - "0 NaN 0.0 0.0 ... \n", - "0 NaN 0.0 0.0 ... \n", + " image_pet_suv_type image_mask_label ... ngl_hdhge_d1_a0.0_2d_fbn_n16 \\\n", + "0 None GTV_Mass ... 649.904779 \n", + "0 None GTV_Mass ... 731.317187 \n", + "0 None GTV_Mass ... 608.689328 \n", "\n", - " ngl_hdhge_d1_a0.0_2d_fbn_n16 ngl_glnu_d1_a0.0_2d_fbn_n16 \\\n", - "0 649.904779 685.643163 \n", - "0 731.317187 232.044296 \n", - "0 608.689328 472.701740 \n", + " ngl_glnu_d1_a0.0_2d_fbn_n16 ngl_glnu_norm_d1_a0.0_2d_fbn_n16 \\\n", + "0 685.643163 0.202679 \n", + "0 232.044296 0.208755 \n", + "0 472.701740 0.345461 \n", "\n", - " ngl_glnu_norm_d1_a0.0_2d_fbn_n16 ngl_dcnu_d1_a0.0_2d_fbn_n16 \\\n", - "0 0.202679 573.379973 \n", - "0 0.208755 212.059230 \n", - "0 0.345461 334.257475 \n", + " ngl_dcnu_d1_a0.0_2d_fbn_n16 ngl_dcnu_norm_d1_a0.0_2d_fbn_n16 \\\n", + "0 573.379973 0.164149 \n", + "0 212.059230 0.183419 \n", + "0 334.257475 0.238191 \n", "\n", - " ngl_dcnu_norm_d1_a0.0_2d_fbn_n16 ngl_dc_perc_d1_a0.0_2d_fbn_n16 \\\n", - "0 0.164149 1.0 \n", - "0 0.183419 1.0 \n", - "0 0.238191 1.0 \n", + " ngl_dc_perc_d1_a0.0_2d_fbn_n16 ngl_gl_var_d1_a0.0_2d_fbn_n16 \\\n", + "0 1.0 2.124407 \n", + "0 1.0 3.301315 \n", + "0 1.0 3.232402 \n", "\n", - " ngl_gl_var_d1_a0.0_2d_fbn_n16 ngl_dc_var_d1_a0.0_2d_fbn_n16 \\\n", - "0 2.124407 2.835808 \n", - "0 3.301315 5.169630 \n", - "0 3.232402 5.422279 \n", + " ngl_dc_var_d1_a0.0_2d_fbn_n16 ngl_dc_entr_d1_a0.0_2d_fbn_n16 \\\n", + "0 2.835808 5.207281 \n", + "0 5.169630 5.048137 \n", + "0 5.422279 4.222418 \n", "\n", - " ngl_dc_entr_d1_a0.0_2d_fbn_n16 ngl_dc_energy_d1_a0.0_2d_fbn_n16 \n", - "0 5.207281 0.034652 \n", - "0 5.048137 0.050158 \n", - "0 4.222418 0.132661 \n", + " ngl_dc_energy_d1_a0.0_2d_fbn_n16 \n", + "0 0.034652 \n", + "0 0.050158 \n", + "0 0.132661 \n", "\n", - "[3 rows x 192 columns]" + "[3 rows x 203 columns]" ] }, "execution_count": 7, @@ -760,7 +765,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.11.8" } }, "nbformat": 4, diff --git a/docs/_static/documentation_options.js b/docs/_static/documentation_options.js index d9603dae..e21a8f99 100644 --- a/docs/_static/documentation_options.js +++ b/docs/_static/documentation_options.js @@ -1,5 +1,5 @@ const DOCUMENTATION_OPTIONS = { - VERSION: '2.2.2', + VERSION: '2.2.3', LANGUAGE: 'en', COLLAPSE_INDEX: false, BUILDER: 'html', diff --git a/docs/configuration.html b/docs/configuration.html index ec23ea9c..75eae56c 100644 --- a/docs/configuration.html +++ b/docs/configuration.html @@ -4,7 +4,7 @@ - Configure the image processing and feature extraction workflow — mirp 2.2.2 documentation + Configure the image processing and feature extraction workflow — mirp 2.2.3 documentation @@ -15,7 +15,7 @@ - + @@ -53,9 +53,7 @@

Tutorials

Deep Dive